linux/arch/powerpc/kvm/book3s_hv_nested.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright IBM Corporation, 2018
   4 * Authors Suraj Jitindar Singh <sjitindarsingh@gmail.com>
   5 *         Paul Mackerras <paulus@ozlabs.org>
   6 *
   7 * Description: KVM functions specific to running nested KVM-HV guests
   8 * on Book3S processors (specifically POWER9 and later).
   9 */
  10
  11#include <linux/kernel.h>
  12#include <linux/kvm_host.h>
  13#include <linux/llist.h>
  14#include <linux/pgtable.h>
  15
  16#include <asm/kvm_ppc.h>
  17#include <asm/kvm_book3s.h>
  18#include <asm/mmu.h>
  19#include <asm/pgalloc.h>
  20#include <asm/pte-walk.h>
  21#include <asm/reg.h>
  22#include <asm/plpar_wrappers.h>
  23
  24static struct patb_entry *pseries_partition_tb;
  25
  26static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp);
  27static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free);
  28
  29void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
  30{
  31        struct kvmppc_vcore *vc = vcpu->arch.vcore;
  32
  33        hr->pcr = vc->pcr | PCR_MASK;
  34        hr->dpdes = vc->dpdes;
  35        hr->hfscr = vcpu->arch.hfscr;
  36        hr->tb_offset = vc->tb_offset;
  37        hr->dawr0 = vcpu->arch.dawr0;
  38        hr->dawrx0 = vcpu->arch.dawrx0;
  39        hr->ciabr = vcpu->arch.ciabr;
  40        hr->purr = vcpu->arch.purr;
  41        hr->spurr = vcpu->arch.spurr;
  42        hr->ic = vcpu->arch.ic;
  43        hr->vtb = vc->vtb;
  44        hr->srr0 = vcpu->arch.shregs.srr0;
  45        hr->srr1 = vcpu->arch.shregs.srr1;
  46        hr->sprg[0] = vcpu->arch.shregs.sprg0;
  47        hr->sprg[1] = vcpu->arch.shregs.sprg1;
  48        hr->sprg[2] = vcpu->arch.shregs.sprg2;
  49        hr->sprg[3] = vcpu->arch.shregs.sprg3;
  50        hr->pidr = vcpu->arch.pid;
  51        hr->cfar = vcpu->arch.cfar;
  52        hr->ppr = vcpu->arch.ppr;
  53        hr->dawr1 = vcpu->arch.dawr1;
  54        hr->dawrx1 = vcpu->arch.dawrx1;
  55}
  56
  57/* Use noinline_for_stack due to https://bugs.llvm.org/show_bug.cgi?id=49610 */
  58static noinline_for_stack void byteswap_pt_regs(struct pt_regs *regs)
  59{
  60        unsigned long *addr = (unsigned long *) regs;
  61
  62        for (; addr < ((unsigned long *) (regs + 1)); addr++)
  63                *addr = swab64(*addr);
  64}
  65
  66static void byteswap_hv_regs(struct hv_guest_state *hr)
  67{
  68        hr->version = swab64(hr->version);
  69        hr->lpid = swab32(hr->lpid);
  70        hr->vcpu_token = swab32(hr->vcpu_token);
  71        hr->lpcr = swab64(hr->lpcr);
  72        hr->pcr = swab64(hr->pcr) | PCR_MASK;
  73        hr->amor = swab64(hr->amor);
  74        hr->dpdes = swab64(hr->dpdes);
  75        hr->hfscr = swab64(hr->hfscr);
  76        hr->tb_offset = swab64(hr->tb_offset);
  77        hr->dawr0 = swab64(hr->dawr0);
  78        hr->dawrx0 = swab64(hr->dawrx0);
  79        hr->ciabr = swab64(hr->ciabr);
  80        hr->hdec_expiry = swab64(hr->hdec_expiry);
  81        hr->purr = swab64(hr->purr);
  82        hr->spurr = swab64(hr->spurr);
  83        hr->ic = swab64(hr->ic);
  84        hr->vtb = swab64(hr->vtb);
  85        hr->hdar = swab64(hr->hdar);
  86        hr->hdsisr = swab64(hr->hdsisr);
  87        hr->heir = swab64(hr->heir);
  88        hr->asdr = swab64(hr->asdr);
  89        hr->srr0 = swab64(hr->srr0);
  90        hr->srr1 = swab64(hr->srr1);
  91        hr->sprg[0] = swab64(hr->sprg[0]);
  92        hr->sprg[1] = swab64(hr->sprg[1]);
  93        hr->sprg[2] = swab64(hr->sprg[2]);
  94        hr->sprg[3] = swab64(hr->sprg[3]);
  95        hr->pidr = swab64(hr->pidr);
  96        hr->cfar = swab64(hr->cfar);
  97        hr->ppr = swab64(hr->ppr);
  98        hr->dawr1 = swab64(hr->dawr1);
  99        hr->dawrx1 = swab64(hr->dawrx1);
 100}
 101
 102static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap,
 103                                 struct hv_guest_state *hr)
 104{
 105        struct kvmppc_vcore *vc = vcpu->arch.vcore;
 106
 107        hr->dpdes = vc->dpdes;
 108        hr->hfscr = vcpu->arch.hfscr;
 109        hr->purr = vcpu->arch.purr;
 110        hr->spurr = vcpu->arch.spurr;
 111        hr->ic = vcpu->arch.ic;
 112        hr->vtb = vc->vtb;
 113        hr->srr0 = vcpu->arch.shregs.srr0;
 114        hr->srr1 = vcpu->arch.shregs.srr1;
 115        hr->sprg[0] = vcpu->arch.shregs.sprg0;
 116        hr->sprg[1] = vcpu->arch.shregs.sprg1;
 117        hr->sprg[2] = vcpu->arch.shregs.sprg2;
 118        hr->sprg[3] = vcpu->arch.shregs.sprg3;
 119        hr->pidr = vcpu->arch.pid;
 120        hr->cfar = vcpu->arch.cfar;
 121        hr->ppr = vcpu->arch.ppr;
 122        switch (trap) {
 123        case BOOK3S_INTERRUPT_H_DATA_STORAGE:
 124                hr->hdar = vcpu->arch.fault_dar;
 125                hr->hdsisr = vcpu->arch.fault_dsisr;
 126                hr->asdr = vcpu->arch.fault_gpa;
 127                break;
 128        case BOOK3S_INTERRUPT_H_INST_STORAGE:
 129                hr->asdr = vcpu->arch.fault_gpa;
 130                break;
 131        case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
 132                hr->heir = vcpu->arch.emul_inst;
 133                break;
 134        }
 135}
 136
 137/*
 138 * This can result in some L0 HV register state being leaked to an L1
 139 * hypervisor when the hv_guest_state is copied back to the guest after
 140 * being modified here.
 141 *
 142 * There is no known problem with such a leak, and in many cases these
 143 * register settings could be derived by the guest by observing behaviour
 144 * and timing, interrupts, etc., but it is an issue to consider.
 145 */
 146static void sanitise_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
 147{
 148        struct kvmppc_vcore *vc = vcpu->arch.vcore;
 149        u64 mask;
 150
 151        /*
 152         * Don't let L1 change LPCR bits for the L2 except these:
 153         */
 154        mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD |
 155                LPCR_LPES | LPCR_MER;
 156
 157        /*
 158         * Additional filtering is required depending on hardware
 159         * and configuration.
 160         */
 161        hr->lpcr = kvmppc_filter_lpcr_hv(vcpu->kvm,
 162                        (vc->lpcr & ~mask) | (hr->lpcr & mask));
 163
 164        /*
 165         * Don't let L1 enable features for L2 which we've disabled for L1,
 166         * but preserve the interrupt cause field.
 167         */
 168        hr->hfscr &= (HFSCR_INTR_CAUSE | vcpu->arch.hfscr);
 169
 170        /* Don't let data address watchpoint match in hypervisor state */
 171        hr->dawrx0 &= ~DAWRX_HYP;
 172        hr->dawrx1 &= ~DAWRX_HYP;
 173
 174        /* Don't let completed instruction address breakpt match in HV state */
 175        if ((hr->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
 176                hr->ciabr &= ~CIABR_PRIV;
 177}
 178
 179static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
 180{
 181        struct kvmppc_vcore *vc = vcpu->arch.vcore;
 182
 183        vc->pcr = hr->pcr | PCR_MASK;
 184        vc->dpdes = hr->dpdes;
 185        vcpu->arch.hfscr = hr->hfscr;
 186        vcpu->arch.dawr0 = hr->dawr0;
 187        vcpu->arch.dawrx0 = hr->dawrx0;
 188        vcpu->arch.ciabr = hr->ciabr;
 189        vcpu->arch.purr = hr->purr;
 190        vcpu->arch.spurr = hr->spurr;
 191        vcpu->arch.ic = hr->ic;
 192        vc->vtb = hr->vtb;
 193        vcpu->arch.shregs.srr0 = hr->srr0;
 194        vcpu->arch.shregs.srr1 = hr->srr1;
 195        vcpu->arch.shregs.sprg0 = hr->sprg[0];
 196        vcpu->arch.shregs.sprg1 = hr->sprg[1];
 197        vcpu->arch.shregs.sprg2 = hr->sprg[2];
 198        vcpu->arch.shregs.sprg3 = hr->sprg[3];
 199        vcpu->arch.pid = hr->pidr;
 200        vcpu->arch.cfar = hr->cfar;
 201        vcpu->arch.ppr = hr->ppr;
 202        vcpu->arch.dawr1 = hr->dawr1;
 203        vcpu->arch.dawrx1 = hr->dawrx1;
 204}
 205
 206void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
 207                                   struct hv_guest_state *hr)
 208{
 209        struct kvmppc_vcore *vc = vcpu->arch.vcore;
 210
 211        vc->dpdes = hr->dpdes;
 212        vcpu->arch.hfscr = hr->hfscr;
 213        vcpu->arch.purr = hr->purr;
 214        vcpu->arch.spurr = hr->spurr;
 215        vcpu->arch.ic = hr->ic;
 216        vc->vtb = hr->vtb;
 217        vcpu->arch.fault_dar = hr->hdar;
 218        vcpu->arch.fault_dsisr = hr->hdsisr;
 219        vcpu->arch.fault_gpa = hr->asdr;
 220        vcpu->arch.emul_inst = hr->heir;
 221        vcpu->arch.shregs.srr0 = hr->srr0;
 222        vcpu->arch.shregs.srr1 = hr->srr1;
 223        vcpu->arch.shregs.sprg0 = hr->sprg[0];
 224        vcpu->arch.shregs.sprg1 = hr->sprg[1];
 225        vcpu->arch.shregs.sprg2 = hr->sprg[2];
 226        vcpu->arch.shregs.sprg3 = hr->sprg[3];
 227        vcpu->arch.pid = hr->pidr;
 228        vcpu->arch.cfar = hr->cfar;
 229        vcpu->arch.ppr = hr->ppr;
 230}
 231
 232static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr)
 233{
 234        /* No need to reflect the page fault to L1, we've handled it */
 235        vcpu->arch.trap = 0;
 236
 237        /*
 238         * Since the L2 gprs have already been written back into L1 memory when
 239         * we complete the mmio, store the L1 memory location of the L2 gpr
 240         * being loaded into by the mmio so that the loaded value can be
 241         * written there in kvmppc_complete_mmio_load()
 242         */
 243        if (((vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) == KVM_MMIO_REG_GPR)
 244            && (vcpu->mmio_is_write == 0)) {
 245                vcpu->arch.nested_io_gpr = (gpa_t) regs_ptr +
 246                                           offsetof(struct pt_regs,
 247                                                    gpr[vcpu->arch.io_gpr]);
 248                vcpu->arch.io_gpr = KVM_MMIO_REG_NESTED_GPR;
 249        }
 250}
 251
 252static int kvmhv_read_guest_state_and_regs(struct kvm_vcpu *vcpu,
 253                                           struct hv_guest_state *l2_hv,
 254                                           struct pt_regs *l2_regs,
 255                                           u64 hv_ptr, u64 regs_ptr)
 256{
 257        int size;
 258
 259        if (kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv->version,
 260                                sizeof(l2_hv->version)))
 261                return -1;
 262
 263        if (kvmppc_need_byteswap(vcpu))
 264                l2_hv->version = swab64(l2_hv->version);
 265
 266        size = hv_guest_state_size(l2_hv->version);
 267        if (size < 0)
 268                return -1;
 269
 270        return kvm_vcpu_read_guest(vcpu, hv_ptr, l2_hv, size) ||
 271                kvm_vcpu_read_guest(vcpu, regs_ptr, l2_regs,
 272                                    sizeof(struct pt_regs));
 273}
 274
 275static int kvmhv_write_guest_state_and_regs(struct kvm_vcpu *vcpu,
 276                                            struct hv_guest_state *l2_hv,
 277                                            struct pt_regs *l2_regs,
 278                                            u64 hv_ptr, u64 regs_ptr)
 279{
 280        int size;
 281
 282        size = hv_guest_state_size(l2_hv->version);
 283        if (size < 0)
 284                return -1;
 285
 286        return kvm_vcpu_write_guest(vcpu, hv_ptr, l2_hv, size) ||
 287                kvm_vcpu_write_guest(vcpu, regs_ptr, l2_regs,
 288                                     sizeof(struct pt_regs));
 289}
 290
 291long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 292{
 293        long int err, r;
 294        struct kvm_nested_guest *l2;
 295        struct pt_regs l2_regs, saved_l1_regs;
 296        struct hv_guest_state l2_hv = {0}, saved_l1_hv;
 297        struct kvmppc_vcore *vc = vcpu->arch.vcore;
 298        u64 hv_ptr, regs_ptr;
 299        u64 hdec_exp;
 300        s64 delta_purr, delta_spurr, delta_ic, delta_vtb;
 301
 302        if (vcpu->kvm->arch.l1_ptcr == 0)
 303                return H_NOT_AVAILABLE;
 304
 305        if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
 306                return H_BAD_MODE;
 307
 308        /* copy parameters in */
 309        hv_ptr = kvmppc_get_gpr(vcpu, 4);
 310        regs_ptr = kvmppc_get_gpr(vcpu, 5);
 311        vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 312        err = kvmhv_read_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
 313                                              hv_ptr, regs_ptr);
 314        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 315        if (err)
 316                return H_PARAMETER;
 317
 318        if (kvmppc_need_byteswap(vcpu))
 319                byteswap_hv_regs(&l2_hv);
 320        if (l2_hv.version > HV_GUEST_STATE_VERSION)
 321                return H_P2;
 322
 323        if (kvmppc_need_byteswap(vcpu))
 324                byteswap_pt_regs(&l2_regs);
 325        if (l2_hv.vcpu_token >= NR_CPUS)
 326                return H_PARAMETER;
 327
 328        /*
 329         * L1 must have set up a suspended state to enter the L2 in a
 330         * transactional state, and only in that case. These have to be
 331         * filtered out here to prevent causing a TM Bad Thing in the
 332         * host HRFID. We could synthesize a TM Bad Thing back to the L1
 333         * here but there doesn't seem like much point.
 334         */
 335        if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) {
 336                if (!MSR_TM_ACTIVE(l2_regs.msr))
 337                        return H_BAD_MODE;
 338        } else {
 339                if (l2_regs.msr & MSR_TS_MASK)
 340                        return H_BAD_MODE;
 341                if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_TS_MASK))
 342                        return H_BAD_MODE;
 343        }
 344
 345        /* translate lpid */
 346        l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
 347        if (!l2)
 348                return H_PARAMETER;
 349        if (!l2->l1_gr_to_hr) {
 350                mutex_lock(&l2->tlb_lock);
 351                kvmhv_update_ptbl_cache(l2);
 352                mutex_unlock(&l2->tlb_lock);
 353        }
 354
 355        /* save l1 values of things */
 356        vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
 357        saved_l1_regs = vcpu->arch.regs;
 358        kvmhv_save_hv_regs(vcpu, &saved_l1_hv);
 359
 360        /* convert TB values/offsets to host (L0) values */
 361        hdec_exp = l2_hv.hdec_expiry - vc->tb_offset;
 362        vc->tb_offset += l2_hv.tb_offset;
 363
 364        /* set L1 state to L2 state */
 365        vcpu->arch.nested = l2;
 366        vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
 367        vcpu->arch.regs = l2_regs;
 368
 369        /* Guest must always run with ME enabled, HV disabled. */
 370        vcpu->arch.shregs.msr = (vcpu->arch.regs.msr | MSR_ME) & ~MSR_HV;
 371
 372        sanitise_hv_regs(vcpu, &l2_hv);
 373        restore_hv_regs(vcpu, &l2_hv);
 374
 375        vcpu->arch.ret = RESUME_GUEST;
 376        vcpu->arch.trap = 0;
 377        do {
 378                if (mftb() >= hdec_exp) {
 379                        vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
 380                        r = RESUME_HOST;
 381                        break;
 382                }
 383                r = kvmhv_run_single_vcpu(vcpu, hdec_exp, l2_hv.lpcr);
 384        } while (is_kvmppc_resume_guest(r));
 385
 386        /* save L2 state for return */
 387        l2_regs = vcpu->arch.regs;
 388        l2_regs.msr = vcpu->arch.shregs.msr;
 389        delta_purr = vcpu->arch.purr - l2_hv.purr;
 390        delta_spurr = vcpu->arch.spurr - l2_hv.spurr;
 391        delta_ic = vcpu->arch.ic - l2_hv.ic;
 392        delta_vtb = vc->vtb - l2_hv.vtb;
 393        save_hv_return_state(vcpu, vcpu->arch.trap, &l2_hv);
 394
 395        /* restore L1 state */
 396        vcpu->arch.nested = NULL;
 397        vcpu->arch.regs = saved_l1_regs;
 398        vcpu->arch.shregs.msr = saved_l1_regs.msr & ~MSR_TS_MASK;
 399        /* set L1 MSR TS field according to L2 transaction state */
 400        if (l2_regs.msr & MSR_TS_MASK)
 401                vcpu->arch.shregs.msr |= MSR_TS_S;
 402        vc->tb_offset = saved_l1_hv.tb_offset;
 403        restore_hv_regs(vcpu, &saved_l1_hv);
 404        vcpu->arch.purr += delta_purr;
 405        vcpu->arch.spurr += delta_spurr;
 406        vcpu->arch.ic += delta_ic;
 407        vc->vtb += delta_vtb;
 408
 409        kvmhv_put_nested(l2);
 410
 411        /* copy l2_hv_state and regs back to guest */
 412        if (kvmppc_need_byteswap(vcpu)) {
 413                byteswap_hv_regs(&l2_hv);
 414                byteswap_pt_regs(&l2_regs);
 415        }
 416        vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 417        err = kvmhv_write_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
 418                                               hv_ptr, regs_ptr);
 419        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 420        if (err)
 421                return H_AUTHORITY;
 422
 423        if (r == -EINTR)
 424                return H_INTERRUPT;
 425
 426        if (vcpu->mmio_needed) {
 427                kvmhv_nested_mmio_needed(vcpu, regs_ptr);
 428                return H_TOO_HARD;
 429        }
 430
 431        return vcpu->arch.trap;
 432}
 433
 434long kvmhv_nested_init(void)
 435{
 436        long int ptb_order;
 437        unsigned long ptcr;
 438        long rc;
 439
 440        if (!kvmhv_on_pseries())
 441                return 0;
 442        if (!radix_enabled())
 443                return -ENODEV;
 444
 445        /* find log base 2 of KVMPPC_NR_LPIDS, rounding up */
 446        ptb_order = __ilog2(KVMPPC_NR_LPIDS - 1) + 1;
 447        if (ptb_order < 8)
 448                ptb_order = 8;
 449        pseries_partition_tb = kmalloc(sizeof(struct patb_entry) << ptb_order,
 450                                       GFP_KERNEL);
 451        if (!pseries_partition_tb) {
 452                pr_err("kvm-hv: failed to allocated nested partition table\n");
 453                return -ENOMEM;
 454        }
 455
 456        ptcr = __pa(pseries_partition_tb) | (ptb_order - 8);
 457        rc = plpar_hcall_norets(H_SET_PARTITION_TABLE, ptcr);
 458        if (rc != H_SUCCESS) {
 459                pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n",
 460                       rc);
 461                kfree(pseries_partition_tb);
 462                pseries_partition_tb = NULL;
 463                return -ENODEV;
 464        }
 465
 466        return 0;
 467}
 468
 469void kvmhv_nested_exit(void)
 470{
 471        /*
 472         * N.B. the kvmhv_on_pseries() test is there because it enables
 473         * the compiler to remove the call to plpar_hcall_norets()
 474         * when CONFIG_PPC_PSERIES=n.
 475         */
 476        if (kvmhv_on_pseries() && pseries_partition_tb) {
 477                plpar_hcall_norets(H_SET_PARTITION_TABLE, 0);
 478                kfree(pseries_partition_tb);
 479                pseries_partition_tb = NULL;
 480        }
 481}
 482
 483static void kvmhv_flush_lpid(unsigned int lpid)
 484{
 485        long rc;
 486
 487        if (!kvmhv_on_pseries()) {
 488                radix__flush_all_lpid(lpid);
 489                return;
 490        }
 491
 492        if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE))
 493                rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
 494                                        lpid, TLBIEL_INVAL_SET_LPID);
 495        else
 496                rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
 497                                            H_RPTI_TYPE_NESTED |
 498                                            H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
 499                                            H_RPTI_TYPE_PAT,
 500                                            H_RPTI_PAGE_ALL, 0, -1UL);
 501        if (rc)
 502                pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
 503}
 504
 505void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1)
 506{
 507        if (!kvmhv_on_pseries()) {
 508                mmu_partition_table_set_entry(lpid, dw0, dw1, true);
 509                return;
 510        }
 511
 512        pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
 513        pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
 514        /* L0 will do the necessary barriers */
 515        kvmhv_flush_lpid(lpid);
 516}
 517
 518static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
 519{
 520        unsigned long dw0;
 521
 522        dw0 = PATB_HR | radix__get_tree_size() |
 523                __pa(gp->shadow_pgtable) | RADIX_PGD_INDEX_SIZE;
 524        kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table);
 525}
 526
 527void kvmhv_vm_nested_init(struct kvm *kvm)
 528{
 529        kvm->arch.max_nested_lpid = -1;
 530}
 531
 532/*
 533 * Handle the H_SET_PARTITION_TABLE hcall.
 534 * r4 = guest real address of partition table + log_2(size) - 12
 535 * (formatted as for the PTCR).
 536 */
 537long kvmhv_set_partition_table(struct kvm_vcpu *vcpu)
 538{
 539        struct kvm *kvm = vcpu->kvm;
 540        unsigned long ptcr = kvmppc_get_gpr(vcpu, 4);
 541        int srcu_idx;
 542        long ret = H_SUCCESS;
 543
 544        srcu_idx = srcu_read_lock(&kvm->srcu);
 545        /*
 546         * Limit the partition table to 4096 entries (because that's what
 547         * hardware supports), and check the base address.
 548         */
 549        if ((ptcr & PRTS_MASK) > 12 - 8 ||
 550            !kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT))
 551                ret = H_PARAMETER;
 552        srcu_read_unlock(&kvm->srcu, srcu_idx);
 553        if (ret == H_SUCCESS)
 554                kvm->arch.l1_ptcr = ptcr;
 555        return ret;
 556}
 557
 558/*
 559 * Handle the H_COPY_TOFROM_GUEST hcall.
 560 * r4 = L1 lpid of nested guest
 561 * r5 = pid
 562 * r6 = eaddr to access
 563 * r7 = to buffer (L1 gpa)
 564 * r8 = from buffer (L1 gpa)
 565 * r9 = n bytes to copy
 566 */
 567long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
 568{
 569        struct kvm_nested_guest *gp;
 570        int l1_lpid = kvmppc_get_gpr(vcpu, 4);
 571        int pid = kvmppc_get_gpr(vcpu, 5);
 572        gva_t eaddr = kvmppc_get_gpr(vcpu, 6);
 573        gpa_t gp_to = (gpa_t) kvmppc_get_gpr(vcpu, 7);
 574        gpa_t gp_from = (gpa_t) kvmppc_get_gpr(vcpu, 8);
 575        void *buf;
 576        unsigned long n = kvmppc_get_gpr(vcpu, 9);
 577        bool is_load = !!gp_to;
 578        long rc;
 579
 580        if (gp_to && gp_from) /* One must be NULL to determine the direction */
 581                return H_PARAMETER;
 582
 583        if (eaddr & (0xFFFUL << 52))
 584                return H_PARAMETER;
 585
 586        buf = kzalloc(n, GFP_KERNEL);
 587        if (!buf)
 588                return H_NO_MEM;
 589
 590        gp = kvmhv_get_nested(vcpu->kvm, l1_lpid, false);
 591        if (!gp) {
 592                rc = H_PARAMETER;
 593                goto out_free;
 594        }
 595
 596        mutex_lock(&gp->tlb_lock);
 597
 598        if (is_load) {
 599                /* Load from the nested guest into our buffer */
 600                rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
 601                                                     eaddr, buf, NULL, n);
 602                if (rc)
 603                        goto not_found;
 604
 605                /* Write what was loaded into our buffer back to the L1 guest */
 606                vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 607                rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n);
 608                srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 609                if (rc)
 610                        goto not_found;
 611        } else {
 612                /* Load the data to be stored from the L1 guest into our buf */
 613                vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 614                rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n);
 615                srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 616                if (rc)
 617                        goto not_found;
 618
 619                /* Store from our buffer into the nested guest */
 620                rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
 621                                                     eaddr, NULL, buf, n);
 622                if (rc)
 623                        goto not_found;
 624        }
 625
 626out_unlock:
 627        mutex_unlock(&gp->tlb_lock);
 628        kvmhv_put_nested(gp);
 629out_free:
 630        kfree(buf);
 631        return rc;
 632not_found:
 633        rc = H_NOT_FOUND;
 634        goto out_unlock;
 635}
 636
 637/*
 638 * Reload the partition table entry for a guest.
 639 * Caller must hold gp->tlb_lock.
 640 */
 641static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
 642{
 643        int ret;
 644        struct patb_entry ptbl_entry;
 645        unsigned long ptbl_addr;
 646        struct kvm *kvm = gp->l1_host;
 647
 648        ret = -EFAULT;
 649        ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
 650        if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8))) {
 651                int srcu_idx = srcu_read_lock(&kvm->srcu);
 652                ret = kvm_read_guest(kvm, ptbl_addr,
 653                                     &ptbl_entry, sizeof(ptbl_entry));
 654                srcu_read_unlock(&kvm->srcu, srcu_idx);
 655        }
 656        if (ret) {
 657                gp->l1_gr_to_hr = 0;
 658                gp->process_table = 0;
 659        } else {
 660                gp->l1_gr_to_hr = be64_to_cpu(ptbl_entry.patb0);
 661                gp->process_table = be64_to_cpu(ptbl_entry.patb1);
 662        }
 663        kvmhv_set_nested_ptbl(gp);
 664}
 665
 666static struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
 667{
 668        struct kvm_nested_guest *gp;
 669        long shadow_lpid;
 670
 671        gp = kzalloc(sizeof(*gp), GFP_KERNEL);
 672        if (!gp)
 673                return NULL;
 674        gp->l1_host = kvm;
 675        gp->l1_lpid = lpid;
 676        mutex_init(&gp->tlb_lock);
 677        gp->shadow_pgtable = pgd_alloc(kvm->mm);
 678        if (!gp->shadow_pgtable)
 679                goto out_free;
 680        shadow_lpid = kvmppc_alloc_lpid();
 681        if (shadow_lpid < 0)
 682                goto out_free2;
 683        gp->shadow_lpid = shadow_lpid;
 684        gp->radix = 1;
 685
 686        memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu));
 687
 688        return gp;
 689
 690 out_free2:
 691        pgd_free(kvm->mm, gp->shadow_pgtable);
 692 out_free:
 693        kfree(gp);
 694        return NULL;
 695}
 696
 697/*
 698 * Free up any resources allocated for a nested guest.
 699 */
 700static void kvmhv_release_nested(struct kvm_nested_guest *gp)
 701{
 702        struct kvm *kvm = gp->l1_host;
 703
 704        if (gp->shadow_pgtable) {
 705                /*
 706                 * No vcpu is using this struct and no call to
 707                 * kvmhv_get_nested can find this struct,
 708                 * so we don't need to hold kvm->mmu_lock.
 709                 */
 710                kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
 711                                          gp->shadow_lpid);
 712                pgd_free(kvm->mm, gp->shadow_pgtable);
 713        }
 714        kvmhv_set_ptbl_entry(gp->shadow_lpid, 0, 0);
 715        kvmppc_free_lpid(gp->shadow_lpid);
 716        kfree(gp);
 717}
 718
 719static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
 720{
 721        struct kvm *kvm = gp->l1_host;
 722        int lpid = gp->l1_lpid;
 723        long ref;
 724
 725        spin_lock(&kvm->mmu_lock);
 726        if (gp == kvm->arch.nested_guests[lpid]) {
 727                kvm->arch.nested_guests[lpid] = NULL;
 728                if (lpid == kvm->arch.max_nested_lpid) {
 729                        while (--lpid >= 0 && !kvm->arch.nested_guests[lpid])
 730                                ;
 731                        kvm->arch.max_nested_lpid = lpid;
 732                }
 733                --gp->refcnt;
 734        }
 735        ref = gp->refcnt;
 736        spin_unlock(&kvm->mmu_lock);
 737        if (ref == 0)
 738                kvmhv_release_nested(gp);
 739}
 740
 741/*
 742 * Free up all nested resources allocated for this guest.
 743 * This is called with no vcpus of the guest running, when
 744 * switching the guest to HPT mode or when destroying the
 745 * guest.
 746 */
 747void kvmhv_release_all_nested(struct kvm *kvm)
 748{
 749        int i;
 750        struct kvm_nested_guest *gp;
 751        struct kvm_nested_guest *freelist = NULL;
 752        struct kvm_memory_slot *memslot;
 753        int srcu_idx;
 754
 755        spin_lock(&kvm->mmu_lock);
 756        for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
 757                gp = kvm->arch.nested_guests[i];
 758                if (!gp)
 759                        continue;
 760                kvm->arch.nested_guests[i] = NULL;
 761                if (--gp->refcnt == 0) {
 762                        gp->next = freelist;
 763                        freelist = gp;
 764                }
 765        }
 766        kvm->arch.max_nested_lpid = -1;
 767        spin_unlock(&kvm->mmu_lock);
 768        while ((gp = freelist) != NULL) {
 769                freelist = gp->next;
 770                kvmhv_release_nested(gp);
 771        }
 772
 773        srcu_idx = srcu_read_lock(&kvm->srcu);
 774        kvm_for_each_memslot(memslot, kvm_memslots(kvm))
 775                kvmhv_free_memslot_nest_rmap(memslot);
 776        srcu_read_unlock(&kvm->srcu, srcu_idx);
 777}
 778
 779/* caller must hold gp->tlb_lock */
 780static void kvmhv_flush_nested(struct kvm_nested_guest *gp)
 781{
 782        struct kvm *kvm = gp->l1_host;
 783
 784        spin_lock(&kvm->mmu_lock);
 785        kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid);
 786        spin_unlock(&kvm->mmu_lock);
 787        kvmhv_flush_lpid(gp->shadow_lpid);
 788        kvmhv_update_ptbl_cache(gp);
 789        if (gp->l1_gr_to_hr == 0)
 790                kvmhv_remove_nested(gp);
 791}
 792
 793struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
 794                                          bool create)
 795{
 796        struct kvm_nested_guest *gp, *newgp;
 797
 798        if (l1_lpid >= KVM_MAX_NESTED_GUESTS ||
 799            l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
 800                return NULL;
 801
 802        spin_lock(&kvm->mmu_lock);
 803        gp = kvm->arch.nested_guests[l1_lpid];
 804        if (gp)
 805                ++gp->refcnt;
 806        spin_unlock(&kvm->mmu_lock);
 807
 808        if (gp || !create)
 809                return gp;
 810
 811        newgp = kvmhv_alloc_nested(kvm, l1_lpid);
 812        if (!newgp)
 813                return NULL;
 814        spin_lock(&kvm->mmu_lock);
 815        if (kvm->arch.nested_guests[l1_lpid]) {
 816                /* someone else beat us to it */
 817                gp = kvm->arch.nested_guests[l1_lpid];
 818        } else {
 819                kvm->arch.nested_guests[l1_lpid] = newgp;
 820                ++newgp->refcnt;
 821                gp = newgp;
 822                newgp = NULL;
 823                if (l1_lpid > kvm->arch.max_nested_lpid)
 824                        kvm->arch.max_nested_lpid = l1_lpid;
 825        }
 826        ++gp->refcnt;
 827        spin_unlock(&kvm->mmu_lock);
 828
 829        if (newgp)
 830                kvmhv_release_nested(newgp);
 831
 832        return gp;
 833}
 834
 835void kvmhv_put_nested(struct kvm_nested_guest *gp)
 836{
 837        struct kvm *kvm = gp->l1_host;
 838        long ref;
 839
 840        spin_lock(&kvm->mmu_lock);
 841        ref = --gp->refcnt;
 842        spin_unlock(&kvm->mmu_lock);
 843        if (ref == 0)
 844                kvmhv_release_nested(gp);
 845}
 846
 847static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid)
 848{
 849        if (lpid > kvm->arch.max_nested_lpid)
 850                return NULL;
 851        return kvm->arch.nested_guests[lpid];
 852}
 853
 854pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
 855                                 unsigned long ea, unsigned *hshift)
 856{
 857        struct kvm_nested_guest *gp;
 858        pte_t *pte;
 859
 860        gp = kvmhv_find_nested(kvm, lpid);
 861        if (!gp)
 862                return NULL;
 863
 864        VM_WARN(!spin_is_locked(&kvm->mmu_lock),
 865                "%s called with kvm mmu_lock not held \n", __func__);
 866        pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift);
 867
 868        return pte;
 869}
 870
 871static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2)
 872{
 873        return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK |
 874                                       RMAP_NESTED_GPA_MASK));
 875}
 876
 877void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
 878                            struct rmap_nested **n_rmap)
 879{
 880        struct llist_node *entry = ((struct llist_head *) rmapp)->first;
 881        struct rmap_nested *cursor;
 882        u64 rmap, new_rmap = (*n_rmap)->rmap;
 883
 884        /* Are there any existing entries? */
 885        if (!(*rmapp)) {
 886                /* No -> use the rmap as a single entry */
 887                *rmapp = new_rmap | RMAP_NESTED_IS_SINGLE_ENTRY;
 888                return;
 889        }
 890
 891        /* Do any entries match what we're trying to insert? */
 892        for_each_nest_rmap_safe(cursor, entry, &rmap) {
 893                if (kvmhv_n_rmap_is_equal(rmap, new_rmap))
 894                        return;
 895        }
 896
 897        /* Do we need to create a list or just add the new entry? */
 898        rmap = *rmapp;
 899        if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */
 900                *rmapp = 0UL;
 901        llist_add(&((*n_rmap)->list), (struct llist_head *) rmapp);
 902        if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */
 903                (*n_rmap)->list.next = (struct llist_node *) rmap;
 904
 905        /* Set NULL so not freed by caller */
 906        *n_rmap = NULL;
 907}
 908
 909static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
 910                                      unsigned long clr, unsigned long set,
 911                                      unsigned long hpa, unsigned long mask)
 912{
 913        unsigned long gpa;
 914        unsigned int shift, lpid;
 915        pte_t *ptep;
 916
 917        gpa = n_rmap & RMAP_NESTED_GPA_MASK;
 918        lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
 919
 920        /* Find the pte */
 921        ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
 922        /*
 923         * If the pte is present and the pfn is still the same, update the pte.
 924         * If the pfn has changed then this is a stale rmap entry, the nested
 925         * gpa actually points somewhere else now, and there is nothing to do.
 926         * XXX A future optimisation would be to remove the rmap entry here.
 927         */
 928        if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa)) {
 929                __radix_pte_update(ptep, clr, set);
 930                kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
 931        }
 932}
 933
 934/*
 935 * For a given list of rmap entries, update the rc bits in all ptes in shadow
 936 * page tables for nested guests which are referenced by the rmap list.
 937 */
 938void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
 939                                    unsigned long clr, unsigned long set,
 940                                    unsigned long hpa, unsigned long nbytes)
 941{
 942        struct llist_node *entry = ((struct llist_head *) rmapp)->first;
 943        struct rmap_nested *cursor;
 944        unsigned long rmap, mask;
 945
 946        if ((clr | set) & ~(_PAGE_DIRTY | _PAGE_ACCESSED))
 947                return;
 948
 949        mask = PTE_RPN_MASK & ~(nbytes - 1);
 950        hpa &= mask;
 951
 952        for_each_nest_rmap_safe(cursor, entry, &rmap)
 953                kvmhv_update_nest_rmap_rc(kvm, rmap, clr, set, hpa, mask);
 954}
 955
 956static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
 957                                   unsigned long hpa, unsigned long mask)
 958{
 959        struct kvm_nested_guest *gp;
 960        unsigned long gpa;
 961        unsigned int shift, lpid;
 962        pte_t *ptep;
 963
 964        gpa = n_rmap & RMAP_NESTED_GPA_MASK;
 965        lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
 966        gp = kvmhv_find_nested(kvm, lpid);
 967        if (!gp)
 968                return;
 969
 970        /* Find and invalidate the pte */
 971        ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
 972        /* Don't spuriously invalidate ptes if the pfn has changed */
 973        if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa))
 974                kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
 975}
 976
 977static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp,
 978                                        unsigned long hpa, unsigned long mask)
 979{
 980        struct llist_node *entry = llist_del_all((struct llist_head *) rmapp);
 981        struct rmap_nested *cursor;
 982        unsigned long rmap;
 983
 984        for_each_nest_rmap_safe(cursor, entry, &rmap) {
 985                kvmhv_remove_nest_rmap(kvm, rmap, hpa, mask);
 986                kfree(cursor);
 987        }
 988}
 989
 990/* called with kvm->mmu_lock held */
 991void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
 992                                  const struct kvm_memory_slot *memslot,
 993                                  unsigned long gpa, unsigned long hpa,
 994                                  unsigned long nbytes)
 995{
 996        unsigned long gfn, end_gfn;
 997        unsigned long addr_mask;
 998
 999        if (!memslot)
1000                return;
1001        gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn;
1002        end_gfn = gfn + (nbytes >> PAGE_SHIFT);
1003
1004        addr_mask = PTE_RPN_MASK & ~(nbytes - 1);
1005        hpa &= addr_mask;
1006
1007        for (; gfn < end_gfn; gfn++) {
1008                unsigned long *rmap = &memslot->arch.rmap[gfn];
1009                kvmhv_remove_nest_rmap_list(kvm, rmap, hpa, addr_mask);
1010        }
1011}
1012
1013static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free)
1014{
1015        unsigned long page;
1016
1017        for (page = 0; page < free->npages; page++) {
1018                unsigned long rmap, *rmapp = &free->arch.rmap[page];
1019                struct rmap_nested *cursor;
1020                struct llist_node *entry;
1021
1022                entry = llist_del_all((struct llist_head *) rmapp);
1023                for_each_nest_rmap_safe(cursor, entry, &rmap)
1024                        kfree(cursor);
1025        }
1026}
1027
1028static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu,
1029                                        struct kvm_nested_guest *gp,
1030                                        long gpa, int *shift_ret)
1031{
1032        struct kvm *kvm = vcpu->kvm;
1033        bool ret = false;
1034        pte_t *ptep;
1035        int shift;
1036
1037        spin_lock(&kvm->mmu_lock);
1038        ptep = find_kvm_nested_guest_pte(kvm, gp->l1_lpid, gpa, &shift);
1039        if (!shift)
1040                shift = PAGE_SHIFT;
1041        if (ptep && pte_present(*ptep)) {
1042                kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
1043                ret = true;
1044        }
1045        spin_unlock(&kvm->mmu_lock);
1046
1047        if (shift_ret)
1048                *shift_ret = shift;
1049        return ret;
1050}
1051
1052static inline int get_ric(unsigned int instr)
1053{
1054        return (instr >> 18) & 0x3;
1055}
1056
1057static inline int get_prs(unsigned int instr)
1058{
1059        return (instr >> 17) & 0x1;
1060}
1061
1062static inline int get_r(unsigned int instr)
1063{
1064        return (instr >> 16) & 0x1;
1065}
1066
1067static inline int get_lpid(unsigned long r_val)
1068{
1069        return r_val & 0xffffffff;
1070}
1071
1072static inline int get_is(unsigned long r_val)
1073{
1074        return (r_val >> 10) & 0x3;
1075}
1076
1077static inline int get_ap(unsigned long r_val)
1078{
1079        return (r_val >> 5) & 0x7;
1080}
1081
1082static inline long get_epn(unsigned long r_val)
1083{
1084        return r_val >> 12;
1085}
1086
1087static int kvmhv_emulate_tlbie_tlb_addr(struct kvm_vcpu *vcpu, int lpid,
1088                                        int ap, long epn)
1089{
1090        struct kvm *kvm = vcpu->kvm;
1091        struct kvm_nested_guest *gp;
1092        long npages;
1093        int shift, shadow_shift;
1094        unsigned long addr;
1095
1096        shift = ap_to_shift(ap);
1097        addr = epn << 12;
1098        if (shift < 0)
1099                /* Invalid ap encoding */
1100                return -EINVAL;
1101
1102        addr &= ~((1UL << shift) - 1);
1103        npages = 1UL << (shift - PAGE_SHIFT);
1104
1105        gp = kvmhv_get_nested(kvm, lpid, false);
1106        if (!gp) /* No such guest -> nothing to do */
1107                return 0;
1108        mutex_lock(&gp->tlb_lock);
1109
1110        /* There may be more than one host page backing this single guest pte */
1111        do {
1112                kvmhv_invalidate_shadow_pte(vcpu, gp, addr, &shadow_shift);
1113
1114                npages -= 1UL << (shadow_shift - PAGE_SHIFT);
1115                addr += 1UL << shadow_shift;
1116        } while (npages > 0);
1117
1118        mutex_unlock(&gp->tlb_lock);
1119        kvmhv_put_nested(gp);
1120        return 0;
1121}
1122
1123static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu,
1124                                     struct kvm_nested_guest *gp, int ric)
1125{
1126        struct kvm *kvm = vcpu->kvm;
1127
1128        mutex_lock(&gp->tlb_lock);
1129        switch (ric) {
1130        case 0:
1131                /* Invalidate TLB */
1132                spin_lock(&kvm->mmu_lock);
1133                kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
1134                                          gp->shadow_lpid);
1135                kvmhv_flush_lpid(gp->shadow_lpid);
1136                spin_unlock(&kvm->mmu_lock);
1137                break;
1138        case 1:
1139                /*
1140                 * Invalidate PWC
1141                 * We don't cache this -> nothing to do
1142                 */
1143                break;
1144        case 2:
1145                /* Invalidate TLB, PWC and caching of partition table entries */
1146                kvmhv_flush_nested(gp);
1147                break;
1148        default:
1149                break;
1150        }
1151        mutex_unlock(&gp->tlb_lock);
1152}
1153
1154static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric)
1155{
1156        struct kvm *kvm = vcpu->kvm;
1157        struct kvm_nested_guest *gp;
1158        int i;
1159
1160        spin_lock(&kvm->mmu_lock);
1161        for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
1162                gp = kvm->arch.nested_guests[i];
1163                if (gp) {
1164                        spin_unlock(&kvm->mmu_lock);
1165                        kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1166                        spin_lock(&kvm->mmu_lock);
1167                }
1168        }
1169        spin_unlock(&kvm->mmu_lock);
1170}
1171
1172static int kvmhv_emulate_priv_tlbie(struct kvm_vcpu *vcpu, unsigned int instr,
1173                                    unsigned long rsval, unsigned long rbval)
1174{
1175        struct kvm *kvm = vcpu->kvm;
1176        struct kvm_nested_guest *gp;
1177        int r, ric, prs, is, ap;
1178        int lpid;
1179        long epn;
1180        int ret = 0;
1181
1182        ric = get_ric(instr);
1183        prs = get_prs(instr);
1184        r = get_r(instr);
1185        lpid = get_lpid(rsval);
1186        is = get_is(rbval);
1187
1188        /*
1189         * These cases are invalid and are not handled:
1190         * r   != 1 -> Only radix supported
1191         * prs == 1 -> Not HV privileged
1192         * ric == 3 -> No cluster bombs for radix
1193         * is  == 1 -> Partition scoped translations not associated with pid
1194         * (!is) && (ric == 1 || ric == 2) -> Not supported by ISA
1195         */
1196        if ((!r) || (prs) || (ric == 3) || (is == 1) ||
1197            ((!is) && (ric == 1 || ric == 2)))
1198                return -EINVAL;
1199
1200        switch (is) {
1201        case 0:
1202                /*
1203                 * We know ric == 0
1204                 * Invalidate TLB for a given target address
1205                 */
1206                epn = get_epn(rbval);
1207                ap = get_ap(rbval);
1208                ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap, epn);
1209                break;
1210        case 2:
1211                /* Invalidate matching LPID */
1212                gp = kvmhv_get_nested(kvm, lpid, false);
1213                if (gp) {
1214                        kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1215                        kvmhv_put_nested(gp);
1216                }
1217                break;
1218        case 3:
1219                /* Invalidate ALL LPIDs */
1220                kvmhv_emulate_tlbie_all_lpid(vcpu, ric);
1221                break;
1222        default:
1223                ret = -EINVAL;
1224                break;
1225        }
1226
1227        return ret;
1228}
1229
1230/*
1231 * This handles the H_TLB_INVALIDATE hcall.
1232 * Parameters are (r4) tlbie instruction code, (r5) rS contents,
1233 * (r6) rB contents.
1234 */
1235long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu)
1236{
1237        int ret;
1238
1239        ret = kvmhv_emulate_priv_tlbie(vcpu, kvmppc_get_gpr(vcpu, 4),
1240                        kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6));
1241        if (ret)
1242                return H_PARAMETER;
1243        return H_SUCCESS;
1244}
1245
1246static long do_tlb_invalidate_nested_all(struct kvm_vcpu *vcpu,
1247                                         unsigned long lpid, unsigned long ric)
1248{
1249        struct kvm *kvm = vcpu->kvm;
1250        struct kvm_nested_guest *gp;
1251
1252        gp = kvmhv_get_nested(kvm, lpid, false);
1253        if (gp) {
1254                kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1255                kvmhv_put_nested(gp);
1256        }
1257        return H_SUCCESS;
1258}
1259
1260/*
1261 * Number of pages above which we invalidate the entire LPID rather than
1262 * flush individual pages.
1263 */
1264static unsigned long tlb_range_flush_page_ceiling __read_mostly = 33;
1265
1266static long do_tlb_invalidate_nested_tlb(struct kvm_vcpu *vcpu,
1267                                         unsigned long lpid,
1268                                         unsigned long pg_sizes,
1269                                         unsigned long start,
1270                                         unsigned long end)
1271{
1272        int ret = H_P4;
1273        unsigned long addr, nr_pages;
1274        struct mmu_psize_def *def;
1275        unsigned long psize, ap, page_size;
1276        bool flush_lpid;
1277
1278        for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
1279                def = &mmu_psize_defs[psize];
1280                if (!(pg_sizes & def->h_rpt_pgsize))
1281                        continue;
1282
1283                nr_pages = (end - start) >> def->shift;
1284                flush_lpid = nr_pages > tlb_range_flush_page_ceiling;
1285                if (flush_lpid)
1286                        return do_tlb_invalidate_nested_all(vcpu, lpid,
1287                                                        RIC_FLUSH_TLB);
1288                addr = start;
1289                ap = mmu_get_ap(psize);
1290                page_size = 1UL << def->shift;
1291                do {
1292                        ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap,
1293                                                   get_epn(addr));
1294                        if (ret)
1295                                return H_P4;
1296                        addr += page_size;
1297                } while (addr < end);
1298        }
1299        return ret;
1300}
1301
1302/*
1303 * Performs partition-scoped invalidations for nested guests
1304 * as part of H_RPT_INVALIDATE hcall.
1305 */
1306long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
1307                             unsigned long type, unsigned long pg_sizes,
1308                             unsigned long start, unsigned long end)
1309{
1310        /*
1311         * If L2 lpid isn't valid, we need to return H_PARAMETER.
1312         *
1313         * However, nested KVM issues a L2 lpid flush call when creating
1314         * partition table entries for L2. This happens even before the
1315         * corresponding shadow lpid is created in HV which happens in
1316         * H_ENTER_NESTED call. Since we can't differentiate this case from
1317         * the invalid case, we ignore such flush requests and return success.
1318         */
1319        if (!kvmhv_find_nested(vcpu->kvm, lpid))
1320                return H_SUCCESS;
1321
1322        /*
1323         * A flush all request can be handled by a full lpid flush only.
1324         */
1325        if ((type & H_RPTI_TYPE_NESTED_ALL) == H_RPTI_TYPE_NESTED_ALL)
1326                return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_ALL);
1327
1328        /*
1329         * We don't need to handle a PWC flush like process table here,
1330         * because intermediate partition scoped table in nested guest doesn't
1331         * really have PWC. Only level we have PWC is in L0 and for nested
1332         * invalidate at L0 we always do kvm_flush_lpid() which does
1333         * radix__flush_all_lpid(). For range invalidate at any level, we
1334         * are not removing the higher level page tables and hence there is
1335         * no PWC invalidate needed.
1336         *
1337         * if (type & H_RPTI_TYPE_PWC) {
1338         *      ret = do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_PWC);
1339         *      if (ret)
1340         *              return H_P4;
1341         * }
1342         */
1343
1344        if (start == 0 && end == -1)
1345                return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_TLB);
1346
1347        if (type & H_RPTI_TYPE_TLB)
1348                return do_tlb_invalidate_nested_tlb(vcpu, lpid, pg_sizes,
1349                                                    start, end);
1350        return H_SUCCESS;
1351}
1352
1353/* Used to convert a nested guest real address to a L1 guest real address */
1354static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu,
1355                                       struct kvm_nested_guest *gp,
1356                                       unsigned long n_gpa, unsigned long dsisr,
1357                                       struct kvmppc_pte *gpte_p)
1358{
1359        u64 fault_addr, flags = dsisr & DSISR_ISSTORE;
1360        int ret;
1361
1362        ret = kvmppc_mmu_walk_radix_tree(vcpu, n_gpa, gpte_p, gp->l1_gr_to_hr,
1363                                         &fault_addr);
1364
1365        if (ret) {
1366                /* We didn't find a pte */
1367                if (ret == -EINVAL) {
1368                        /* Unsupported mmu config */
1369                        flags |= DSISR_UNSUPP_MMU;
1370                } else if (ret == -ENOENT) {
1371                        /* No translation found */
1372                        flags |= DSISR_NOHPTE;
1373                } else if (ret == -EFAULT) {
1374                        /* Couldn't access L1 real address */
1375                        flags |= DSISR_PRTABLE_FAULT;
1376                        vcpu->arch.fault_gpa = fault_addr;
1377                } else {
1378                        /* Unknown error */
1379                        return ret;
1380                }
1381                goto forward_to_l1;
1382        } else {
1383                /* We found a pte -> check permissions */
1384                if (dsisr & DSISR_ISSTORE) {
1385                        /* Can we write? */
1386                        if (!gpte_p->may_write) {
1387                                flags |= DSISR_PROTFAULT;
1388                                goto forward_to_l1;
1389                        }
1390                } else if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
1391                        /* Can we execute? */
1392                        if (!gpte_p->may_execute) {
1393                                flags |= SRR1_ISI_N_G_OR_CIP;
1394                                goto forward_to_l1;
1395                        }
1396                } else {
1397                        /* Can we read? */
1398                        if (!gpte_p->may_read && !gpte_p->may_write) {
1399                                flags |= DSISR_PROTFAULT;
1400                                goto forward_to_l1;
1401                        }
1402                }
1403        }
1404
1405        return 0;
1406
1407forward_to_l1:
1408        vcpu->arch.fault_dsisr = flags;
1409        if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
1410                vcpu->arch.shregs.msr &= SRR1_MSR_BITS;
1411                vcpu->arch.shregs.msr |= flags;
1412        }
1413        return RESUME_HOST;
1414}
1415
1416static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
1417                                       struct kvm_nested_guest *gp,
1418                                       unsigned long n_gpa,
1419                                       struct kvmppc_pte gpte,
1420                                       unsigned long dsisr)
1421{
1422        struct kvm *kvm = vcpu->kvm;
1423        bool writing = !!(dsisr & DSISR_ISSTORE);
1424        u64 pgflags;
1425        long ret;
1426
1427        /* Are the rc bits set in the L1 partition scoped pte? */
1428        pgflags = _PAGE_ACCESSED;
1429        if (writing)
1430                pgflags |= _PAGE_DIRTY;
1431        if (pgflags & ~gpte.rc)
1432                return RESUME_HOST;
1433
1434        spin_lock(&kvm->mmu_lock);
1435        /* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */
1436        ret = kvmppc_hv_handle_set_rc(kvm, false, writing,
1437                                      gpte.raddr, kvm->arch.lpid);
1438        if (!ret) {
1439                ret = -EINVAL;
1440                goto out_unlock;
1441        }
1442
1443        /* Set the rc bit in the pte of the shadow_pgtable for the nest guest */
1444        ret = kvmppc_hv_handle_set_rc(kvm, true, writing,
1445                                      n_gpa, gp->l1_lpid);
1446        if (!ret)
1447                ret = -EINVAL;
1448        else
1449                ret = 0;
1450
1451out_unlock:
1452        spin_unlock(&kvm->mmu_lock);
1453        return ret;
1454}
1455
1456static inline int kvmppc_radix_level_to_shift(int level)
1457{
1458        switch (level) {
1459        case 2:
1460                return PUD_SHIFT;
1461        case 1:
1462                return PMD_SHIFT;
1463        default:
1464                return PAGE_SHIFT;
1465        }
1466}
1467
1468static inline int kvmppc_radix_shift_to_level(int shift)
1469{
1470        if (shift == PUD_SHIFT)
1471                return 2;
1472        if (shift == PMD_SHIFT)
1473                return 1;
1474        if (shift == PAGE_SHIFT)
1475                return 0;
1476        WARN_ON_ONCE(1);
1477        return 0;
1478}
1479
1480/* called with gp->tlb_lock held */
1481static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
1482                                          struct kvm_nested_guest *gp)
1483{
1484        struct kvm *kvm = vcpu->kvm;
1485        struct kvm_memory_slot *memslot;
1486        struct rmap_nested *n_rmap;
1487        struct kvmppc_pte gpte;
1488        pte_t pte, *pte_p;
1489        unsigned long mmu_seq;
1490        unsigned long dsisr = vcpu->arch.fault_dsisr;
1491        unsigned long ea = vcpu->arch.fault_dar;
1492        unsigned long *rmapp;
1493        unsigned long n_gpa, gpa, gfn, perm = 0UL;
1494        unsigned int shift, l1_shift, level;
1495        bool writing = !!(dsisr & DSISR_ISSTORE);
1496        bool kvm_ro = false;
1497        long int ret;
1498
1499        if (!gp->l1_gr_to_hr) {
1500                kvmhv_update_ptbl_cache(gp);
1501                if (!gp->l1_gr_to_hr)
1502                        return RESUME_HOST;
1503        }
1504
1505        /* Convert the nested guest real address into a L1 guest real address */
1506
1507        n_gpa = vcpu->arch.fault_gpa & ~0xF000000000000FFFULL;
1508        if (!(dsisr & DSISR_PRTABLE_FAULT))
1509                n_gpa |= ea & 0xFFF;
1510        ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte);
1511
1512        /*
1513         * If the hardware found a translation but we don't now have a usable
1514         * translation in the l1 partition-scoped tree, remove the shadow pte
1515         * and let the guest retry.
1516         */
1517        if (ret == RESUME_HOST &&
1518            (dsisr & (DSISR_PROTFAULT | DSISR_BADACCESS | DSISR_NOEXEC_OR_G |
1519                      DSISR_BAD_COPYPASTE)))
1520                goto inval;
1521        if (ret)
1522                return ret;
1523
1524        /* Failed to set the reference/change bits */
1525        if (dsisr & DSISR_SET_RC) {
1526                ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr);
1527                if (ret == RESUME_HOST)
1528                        return ret;
1529                if (ret)
1530                        goto inval;
1531                dsisr &= ~DSISR_SET_RC;
1532                if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
1533                               DSISR_PROTFAULT)))
1534                        return RESUME_GUEST;
1535        }
1536
1537        /*
1538         * We took an HISI or HDSI while we were running a nested guest which
1539         * means we have no partition scoped translation for that. This means
1540         * we need to insert a pte for the mapping into our shadow_pgtable.
1541         */
1542
1543        l1_shift = gpte.page_shift;
1544        if (l1_shift < PAGE_SHIFT) {
1545                /* We don't support l1 using a page size smaller than our own */
1546                pr_err("KVM: L1 guest page shift (%d) less than our own (%d)\n",
1547                        l1_shift, PAGE_SHIFT);
1548                return -EINVAL;
1549        }
1550        gpa = gpte.raddr;
1551        gfn = gpa >> PAGE_SHIFT;
1552
1553        /* 1. Get the corresponding host memslot */
1554
1555        memslot = gfn_to_memslot(kvm, gfn);
1556        if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
1557                if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) {
1558                        /* unusual error -> reflect to the guest as a DSI */
1559                        kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
1560                        return RESUME_GUEST;
1561                }
1562
1563                /* passthrough of emulated MMIO case */
1564                return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
1565        }
1566        if (memslot->flags & KVM_MEM_READONLY) {
1567                if (writing) {
1568                        /* Give the guest a DSI */
1569                        kvmppc_core_queue_data_storage(vcpu, ea,
1570                                        DSISR_ISSTORE | DSISR_PROTFAULT);
1571                        return RESUME_GUEST;
1572                }
1573                kvm_ro = true;
1574        }
1575
1576        /* 2. Find the host pte for this L1 guest real address */
1577
1578        /* Used to check for invalidations in progress */
1579        mmu_seq = kvm->mmu_notifier_seq;
1580        smp_rmb();
1581
1582        /* See if can find translation in our partition scoped tables for L1 */
1583        pte = __pte(0);
1584        spin_lock(&kvm->mmu_lock);
1585        pte_p = find_kvm_secondary_pte(kvm, gpa, &shift);
1586        if (!shift)
1587                shift = PAGE_SHIFT;
1588        if (pte_p)
1589                pte = *pte_p;
1590        spin_unlock(&kvm->mmu_lock);
1591
1592        if (!pte_present(pte) || (writing && !(pte_val(pte) & _PAGE_WRITE))) {
1593                /* No suitable pte found -> try to insert a mapping */
1594                ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot,
1595                                        writing, kvm_ro, &pte, &level);
1596                if (ret == -EAGAIN)
1597                        return RESUME_GUEST;
1598                else if (ret)
1599                        return ret;
1600                shift = kvmppc_radix_level_to_shift(level);
1601        }
1602        /* Align gfn to the start of the page */
1603        gfn = (gpa & ~((1UL << shift) - 1)) >> PAGE_SHIFT;
1604
1605        /* 3. Compute the pte we need to insert for nest_gpa -> host r_addr */
1606
1607        /* The permissions is the combination of the host and l1 guest ptes */
1608        perm |= gpte.may_read ? 0UL : _PAGE_READ;
1609        perm |= gpte.may_write ? 0UL : _PAGE_WRITE;
1610        perm |= gpte.may_execute ? 0UL : _PAGE_EXEC;
1611        /* Only set accessed/dirty (rc) bits if set in host and l1 guest ptes */
1612        perm |= (gpte.rc & _PAGE_ACCESSED) ? 0UL : _PAGE_ACCESSED;
1613        perm |= ((gpte.rc & _PAGE_DIRTY) && writing) ? 0UL : _PAGE_DIRTY;
1614        pte = __pte(pte_val(pte) & ~perm);
1615
1616        /* What size pte can we insert? */
1617        if (shift > l1_shift) {
1618                u64 mask;
1619                unsigned int actual_shift = PAGE_SHIFT;
1620                if (PMD_SHIFT < l1_shift)
1621                        actual_shift = PMD_SHIFT;
1622                mask = (1UL << shift) - (1UL << actual_shift);
1623                pte = __pte(pte_val(pte) | (gpa & mask));
1624                shift = actual_shift;
1625        }
1626        level = kvmppc_radix_shift_to_level(shift);
1627        n_gpa &= ~((1UL << shift) - 1);
1628
1629        /* 4. Insert the pte into our shadow_pgtable */
1630
1631        n_rmap = kzalloc(sizeof(*n_rmap), GFP_KERNEL);
1632        if (!n_rmap)
1633                return RESUME_GUEST; /* Let the guest try again */
1634        n_rmap->rmap = (n_gpa & RMAP_NESTED_GPA_MASK) |
1635                (((unsigned long) gp->l1_lpid) << RMAP_NESTED_LPID_SHIFT);
1636        rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1637        ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level,
1638                                mmu_seq, gp->shadow_lpid, rmapp, &n_rmap);
1639        kfree(n_rmap);
1640        if (ret == -EAGAIN)
1641                ret = RESUME_GUEST;     /* Let the guest try again */
1642
1643        return ret;
1644
1645 inval:
1646        kvmhv_invalidate_shadow_pte(vcpu, gp, n_gpa, NULL);
1647        return RESUME_GUEST;
1648}
1649
1650long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
1651{
1652        struct kvm_nested_guest *gp = vcpu->arch.nested;
1653        long int ret;
1654
1655        mutex_lock(&gp->tlb_lock);
1656        ret = __kvmhv_nested_page_fault(vcpu, gp);
1657        mutex_unlock(&gp->tlb_lock);
1658        return ret;
1659}
1660
1661int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid)
1662{
1663        int ret = -1;
1664
1665        spin_lock(&kvm->mmu_lock);
1666        while (++lpid <= kvm->arch.max_nested_lpid) {
1667                if (kvm->arch.nested_guests[lpid]) {
1668                        ret = lpid;
1669                        break;
1670                }
1671        }
1672        spin_unlock(&kvm->mmu_lock);
1673        return ret;
1674}
1675