linux/arch/powerpc/kvm/book3s_64_mmu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *
   4 * Copyright SUSE Linux Products GmbH 2009
   5 *
   6 * Authors: Alexander Graf <agraf@suse.de>
   7 */
   8
   9#include <linux/types.h>
  10#include <linux/string.h>
  11#include <linux/kvm.h>
  12#include <linux/kvm_host.h>
  13#include <linux/highmem.h>
  14
  15#include <asm/kvm_ppc.h>
  16#include <asm/kvm_book3s.h>
  17#include <asm/book3s/64/mmu-hash.h>
  18
  19/* #define DEBUG_MMU */
  20
  21#ifdef DEBUG_MMU
  22#define dprintk(X...) printk(KERN_INFO X)
  23#else
  24#define dprintk(X...) do { } while(0)
  25#endif
  26
  27static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
  28                                struct kvm_vcpu *vcpu,
  29                                gva_t eaddr)
  30{
  31        int i;
  32        u64 esid = GET_ESID(eaddr);
  33        u64 esid_1t = GET_ESID_1T(eaddr);
  34
  35        for (i = 0; i < vcpu->arch.slb_nr; i++) {
  36                u64 cmp_esid = esid;
  37
  38                if (!vcpu->arch.slb[i].valid)
  39                        continue;
  40
  41                if (vcpu->arch.slb[i].tb)
  42                        cmp_esid = esid_1t;
  43
  44                if (vcpu->arch.slb[i].esid == cmp_esid)
  45                        return &vcpu->arch.slb[i];
  46        }
  47
  48        dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n",
  49                eaddr, esid, esid_1t);
  50        for (i = 0; i < vcpu->arch.slb_nr; i++) {
  51            if (vcpu->arch.slb[i].vsid)
  52                dprintk("  %d: %c%c%c %llx %llx\n", i,
  53                        vcpu->arch.slb[i].valid ? 'v' : ' ',
  54                        vcpu->arch.slb[i].large ? 'l' : ' ',
  55                        vcpu->arch.slb[i].tb    ? 't' : ' ',
  56                        vcpu->arch.slb[i].esid,
  57                        vcpu->arch.slb[i].vsid);
  58        }
  59
  60        return NULL;
  61}
  62
  63static int kvmppc_slb_sid_shift(struct kvmppc_slb *slbe)
  64{
  65        return slbe->tb ? SID_SHIFT_1T : SID_SHIFT;
  66}
  67
  68static u64 kvmppc_slb_offset_mask(struct kvmppc_slb *slbe)
  69{
  70        return (1ul << kvmppc_slb_sid_shift(slbe)) - 1;
  71}
  72
  73static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr)
  74{
  75        eaddr &= kvmppc_slb_offset_mask(slb);
  76
  77        return (eaddr >> VPN_SHIFT) |
  78                ((slb->vsid) << (kvmppc_slb_sid_shift(slb) - VPN_SHIFT));
  79}
  80
  81static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
  82                                         bool data)
  83{
  84        struct kvmppc_slb *slb;
  85
  86        slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
  87        if (!slb)
  88                return 0;
  89
  90        return kvmppc_slb_calc_vpn(slb, eaddr);
  91}
  92
  93static int mmu_pagesize(int mmu_pg)
  94{
  95        switch (mmu_pg) {
  96        case MMU_PAGE_64K:
  97                return 16;
  98        case MMU_PAGE_16M:
  99                return 24;
 100        }
 101        return 12;
 102}
 103
 104static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
 105{
 106        return mmu_pagesize(slbe->base_page_size);
 107}
 108
 109static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
 110{
 111        int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
 112
 113        return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p);
 114}
 115
 116static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu,
 117                                struct kvmppc_slb *slbe, gva_t eaddr,
 118                                bool second)
 119{
 120        struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
 121        u64 hash, pteg, htabsize;
 122        u32 ssize;
 123        hva_t r;
 124        u64 vpn;
 125
 126        htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1);
 127
 128        vpn = kvmppc_slb_calc_vpn(slbe, eaddr);
 129        ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M;
 130        hash = hpt_hash(vpn, kvmppc_mmu_book3s_64_get_pagesize(slbe), ssize);
 131        if (second)
 132                hash = ~hash;
 133        hash &= ((1ULL << 39ULL) - 1ULL);
 134        hash &= htabsize;
 135        hash <<= 7ULL;
 136
 137        pteg = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
 138        pteg |= hash;
 139
 140        dprintk("MMU: page=0x%x sdr1=0x%llx pteg=0x%llx vsid=0x%llx\n",
 141                page, vcpu_book3s->sdr1, pteg, slbe->vsid);
 142
 143        /* When running a PAPR guest, SDR1 contains a HVA address instead
 144           of a GPA */
 145        if (vcpu->arch.papr_enabled)
 146                r = pteg;
 147        else
 148                r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
 149
 150        if (kvm_is_error_hva(r))
 151                return r;
 152        return r | (pteg & ~PAGE_MASK);
 153}
 154
 155static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
 156{
 157        int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
 158        u64 avpn;
 159
 160        avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
 161        avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p);
 162
 163        if (p < 16)
 164                avpn >>= ((80 - p) - 56) - 8;   /* 16 - p */
 165        else
 166                avpn <<= p - 16;
 167
 168        return avpn;
 169}
 170
 171/*
 172 * Return page size encoded in the second word of a HPTE, or
 173 * -1 for an invalid encoding for the base page size indicated by
 174 * the SLB entry.  This doesn't handle mixed pagesize segments yet.
 175 */
 176static int decode_pagesize(struct kvmppc_slb *slbe, u64 r)
 177{
 178        switch (slbe->base_page_size) {
 179        case MMU_PAGE_64K:
 180                if ((r & 0xf000) == 0x1000)
 181                        return MMU_PAGE_64K;
 182                break;
 183        case MMU_PAGE_16M:
 184                if ((r & 0xff000) == 0)
 185                        return MMU_PAGE_16M;
 186                break;
 187        }
 188        return -1;
 189}
 190
 191static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
 192                                      struct kvmppc_pte *gpte, bool data,
 193                                      bool iswrite)
 194{
 195        struct kvmppc_slb *slbe;
 196        hva_t ptegp;
 197        u64 pteg[16];
 198        u64 avpn = 0;
 199        u64 v, r;
 200        u64 v_val, v_mask;
 201        u64 eaddr_mask;
 202        int i;
 203        u8 pp, key = 0;
 204        bool found = false;
 205        bool second = false;
 206        int pgsize;
 207        ulong mp_ea = vcpu->arch.magic_page_ea;
 208
 209        /* Magic page override */
 210        if (unlikely(mp_ea) &&
 211            unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
 212            !(kvmppc_get_msr(vcpu) & MSR_PR)) {
 213                gpte->eaddr = eaddr;
 214                gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
 215                gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff);
 216                gpte->raddr &= KVM_PAM;
 217                gpte->may_execute = true;
 218                gpte->may_read = true;
 219                gpte->may_write = true;
 220                gpte->page_size = MMU_PAGE_4K;
 221                gpte->wimg = HPTE_R_M;
 222
 223                return 0;
 224        }
 225
 226        slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
 227        if (!slbe)
 228                goto no_seg_found;
 229
 230        avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
 231        v_val = avpn & HPTE_V_AVPN;
 232
 233        if (slbe->tb)
 234                v_val |= SLB_VSID_B_1T;
 235        if (slbe->large)
 236                v_val |= HPTE_V_LARGE;
 237        v_val |= HPTE_V_VALID;
 238
 239        v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID |
 240                HPTE_V_SECONDARY;
 241
 242        pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K;
 243
 244        mutex_lock(&vcpu->kvm->arch.hpt_mutex);
 245
 246do_second:
 247        ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second);
 248        if (kvm_is_error_hva(ptegp))
 249                goto no_page_found;
 250
 251        if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) {
 252                printk_ratelimited(KERN_ERR
 253                        "KVM: Can't copy data from 0x%lx!\n", ptegp);
 254                goto no_page_found;
 255        }
 256
 257        if ((kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Kp)
 258                key = 4;
 259        else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Ks)
 260                key = 4;
 261
 262        for (i=0; i<16; i+=2) {
 263                u64 pte0 = be64_to_cpu(pteg[i]);
 264                u64 pte1 = be64_to_cpu(pteg[i + 1]);
 265
 266                /* Check all relevant fields of 1st dword */
 267                if ((pte0 & v_mask) == v_val) {
 268                        /* If large page bit is set, check pgsize encoding */
 269                        if (slbe->large &&
 270                            (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
 271                                pgsize = decode_pagesize(slbe, pte1);
 272                                if (pgsize < 0)
 273                                        continue;
 274                        }
 275                        found = true;
 276                        break;
 277                }
 278        }
 279
 280        if (!found) {
 281                if (second)
 282                        goto no_page_found;
 283                v_val |= HPTE_V_SECONDARY;
 284                second = true;
 285                goto do_second;
 286        }
 287
 288        v = be64_to_cpu(pteg[i]);
 289        r = be64_to_cpu(pteg[i+1]);
 290        pp = (r & HPTE_R_PP) | key;
 291        if (r & HPTE_R_PP0)
 292                pp |= 8;
 293
 294        gpte->eaddr = eaddr;
 295        gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
 296
 297        eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1;
 298        gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
 299        gpte->page_size = pgsize;
 300        gpte->may_execute = ((r & HPTE_R_N) ? false : true);
 301        if (unlikely(vcpu->arch.disable_kernel_nx) &&
 302            !(kvmppc_get_msr(vcpu) & MSR_PR))
 303                gpte->may_execute = true;
 304        gpte->may_read = false;
 305        gpte->may_write = false;
 306        gpte->wimg = r & HPTE_R_WIMG;
 307
 308        switch (pp) {
 309        case 0:
 310        case 1:
 311        case 2:
 312        case 6:
 313                gpte->may_write = true;
 314                fallthrough;
 315        case 3:
 316        case 5:
 317        case 7:
 318        case 10:
 319                gpte->may_read = true;
 320                break;
 321        }
 322
 323        dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
 324                "-> 0x%lx\n",
 325                eaddr, avpn, gpte->vpage, gpte->raddr);
 326
 327        /* Update PTE R and C bits, so the guest's swapper knows we used the
 328         * page */
 329        if (gpte->may_read && !(r & HPTE_R_R)) {
 330                /*
 331                 * Set the accessed flag.
 332                 * We have to write this back with a single byte write
 333                 * because another vcpu may be accessing this on
 334                 * non-PAPR platforms such as mac99, and this is
 335                 * what real hardware does.
 336                 */
 337                char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64));
 338                r |= HPTE_R_R;
 339                put_user(r >> 8, addr + 6);
 340        }
 341        if (iswrite && gpte->may_write && !(r & HPTE_R_C)) {
 342                /* Set the dirty flag */
 343                /* Use a single byte write */
 344                char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64));
 345                r |= HPTE_R_C;
 346                put_user(r, addr + 7);
 347        }
 348
 349        mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
 350
 351        if (!gpte->may_read || (iswrite && !gpte->may_write))
 352                return -EPERM;
 353        return 0;
 354
 355no_page_found:
 356        mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
 357        return -ENOENT;
 358
 359no_seg_found:
 360        dprintk("KVM MMU: Trigger segment fault\n");
 361        return -EINVAL;
 362}
 363
 364static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
 365{
 366        u64 esid, esid_1t;
 367        int slb_nr;
 368        struct kvmppc_slb *slbe;
 369
 370        dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb);
 371
 372        esid = GET_ESID(rb);
 373        esid_1t = GET_ESID_1T(rb);
 374        slb_nr = rb & 0xfff;
 375
 376        if (slb_nr > vcpu->arch.slb_nr)
 377                return;
 378
 379        slbe = &vcpu->arch.slb[slb_nr];
 380
 381        slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
 382        slbe->tb    = (rs & SLB_VSID_B_1T) ? 1 : 0;
 383        slbe->esid  = slbe->tb ? esid_1t : esid;
 384        slbe->vsid  = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16);
 385        slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
 386        slbe->Ks    = (rs & SLB_VSID_KS) ? 1 : 0;
 387        slbe->Kp    = (rs & SLB_VSID_KP) ? 1 : 0;
 388        slbe->nx    = (rs & SLB_VSID_N) ? 1 : 0;
 389        slbe->class = (rs & SLB_VSID_C) ? 1 : 0;
 390
 391        slbe->base_page_size = MMU_PAGE_4K;
 392        if (slbe->large) {
 393                if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) {
 394                        switch (rs & SLB_VSID_LP) {
 395                        case SLB_VSID_LP_00:
 396                                slbe->base_page_size = MMU_PAGE_16M;
 397                                break;
 398                        case SLB_VSID_LP_01:
 399                                slbe->base_page_size = MMU_PAGE_64K;
 400                                break;
 401                        }
 402                } else
 403                        slbe->base_page_size = MMU_PAGE_16M;
 404        }
 405
 406        slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
 407        slbe->origv = rs;
 408
 409        /* Map the new segment */
 410        kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT);
 411}
 412
 413static int kvmppc_mmu_book3s_64_slbfee(struct kvm_vcpu *vcpu, gva_t eaddr,
 414                                       ulong *ret_slb)
 415{
 416        struct kvmppc_slb *slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
 417
 418        if (slbe) {
 419                *ret_slb = slbe->origv;
 420                return 0;
 421        }
 422        *ret_slb = 0;
 423        return -ENOENT;
 424}
 425
 426static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
 427{
 428        struct kvmppc_slb *slbe;
 429
 430        if (slb_nr > vcpu->arch.slb_nr)
 431                return 0;
 432
 433        slbe = &vcpu->arch.slb[slb_nr];
 434
 435        return slbe->orige;
 436}
 437
 438static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
 439{
 440        struct kvmppc_slb *slbe;
 441
 442        if (slb_nr > vcpu->arch.slb_nr)
 443                return 0;
 444
 445        slbe = &vcpu->arch.slb[slb_nr];
 446
 447        return slbe->origv;
 448}
 449
 450static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
 451{
 452        struct kvmppc_slb *slbe;
 453        u64 seg_size;
 454
 455        dprintk("KVM MMU: slbie(0x%llx)\n", ea);
 456
 457        slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
 458
 459        if (!slbe)
 460                return;
 461
 462        dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid);
 463
 464        slbe->valid = false;
 465        slbe->orige = 0;
 466        slbe->origv = 0;
 467
 468        seg_size = 1ull << kvmppc_slb_sid_shift(slbe);
 469        kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size);
 470}
 471
 472static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
 473{
 474        int i;
 475
 476        dprintk("KVM MMU: slbia()\n");
 477
 478        for (i = 1; i < vcpu->arch.slb_nr; i++) {
 479                vcpu->arch.slb[i].valid = false;
 480                vcpu->arch.slb[i].orige = 0;
 481                vcpu->arch.slb[i].origv = 0;
 482        }
 483
 484        if (kvmppc_get_msr(vcpu) & MSR_IR) {
 485                kvmppc_mmu_flush_segments(vcpu);
 486                kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
 487        }
 488}
 489
 490static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
 491                                        ulong value)
 492{
 493        u64 rb = 0, rs = 0;
 494
 495        /*
 496         * According to Book3 2.01 mtsrin is implemented as:
 497         *
 498         * The SLB entry specified by (RB)32:35 is loaded from register
 499         * RS, as follows.
 500         *
 501         * SLBE Bit     Source                  SLB Field
 502         *
 503         * 0:31         0x0000_0000             ESID-0:31
 504         * 32:35        (RB)32:35               ESID-32:35
 505         * 36           0b1                     V
 506         * 37:61        0x00_0000|| 0b0         VSID-0:24
 507         * 62:88        (RS)37:63               VSID-25:51
 508         * 89:91        (RS)33:35               Ks Kp N
 509         * 92           (RS)36                  L ((RS)36 must be 0b0)
 510         * 93           0b0                     C
 511         */
 512
 513        dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value);
 514
 515        /* ESID = srnum */
 516        rb |= (srnum & 0xf) << 28;
 517        /* Set the valid bit */
 518        rb |= 1 << 27;
 519        /* Index = ESID */
 520        rb |= srnum;
 521
 522        /* VSID = VSID */
 523        rs |= (value & 0xfffffff) << 12;
 524        /* flags = flags */
 525        rs |= ((value >> 28) & 0x7) << 9;
 526
 527        kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
 528}
 529
 530static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
 531                                       bool large)
 532{
 533        u64 mask = 0xFFFFFFFFFULL;
 534        long i;
 535        struct kvm_vcpu *v;
 536
 537        dprintk("KVM MMU: tlbie(0x%lx)\n", va);
 538
 539        /*
 540         * The tlbie instruction changed behaviour starting with
 541         * POWER6.  POWER6 and later don't have the large page flag
 542         * in the instruction but in the RB value, along with bits
 543         * indicating page and segment sizes.
 544         */
 545        if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) {
 546                /* POWER6 or later */
 547                if (va & 1) {           /* L bit */
 548                        if ((va & 0xf000) == 0x1000)
 549                                mask = 0xFFFFFFFF0ULL;  /* 64k page */
 550                        else
 551                                mask = 0xFFFFFF000ULL;  /* 16M page */
 552                }
 553        } else {
 554                /* older processors, e.g. PPC970 */
 555                if (large)
 556                        mask = 0xFFFFFF000ULL;
 557        }
 558        /* flush this VA on all vcpus */
 559        kvm_for_each_vcpu(i, v, vcpu->kvm)
 560                kvmppc_mmu_pte_vflush(v, va >> 12, mask);
 561}
 562
 563#ifdef CONFIG_PPC_64K_PAGES
 564static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
 565{
 566        ulong mp_ea = vcpu->arch.magic_page_ea;
 567
 568        return mp_ea && !(kvmppc_get_msr(vcpu) & MSR_PR) &&
 569                (mp_ea >> SID_SHIFT) == esid;
 570}
 571#endif
 572
 573static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
 574                                             u64 *vsid)
 575{
 576        ulong ea = esid << SID_SHIFT;
 577        struct kvmppc_slb *slb;
 578        u64 gvsid = esid;
 579        ulong mp_ea = vcpu->arch.magic_page_ea;
 580        int pagesize = MMU_PAGE_64K;
 581        u64 msr = kvmppc_get_msr(vcpu);
 582
 583        if (msr & (MSR_DR|MSR_IR)) {
 584                slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
 585                if (slb) {
 586                        gvsid = slb->vsid;
 587                        pagesize = slb->base_page_size;
 588                        if (slb->tb) {
 589                                gvsid <<= SID_SHIFT_1T - SID_SHIFT;
 590                                gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1);
 591                                gvsid |= VSID_1T;
 592                        }
 593                }
 594        }
 595
 596        switch (msr & (MSR_DR|MSR_IR)) {
 597        case 0:
 598                gvsid = VSID_REAL | esid;
 599                break;
 600        case MSR_IR:
 601                gvsid |= VSID_REAL_IR;
 602                break;
 603        case MSR_DR:
 604                gvsid |= VSID_REAL_DR;
 605                break;
 606        case MSR_DR|MSR_IR:
 607                if (!slb)
 608                        goto no_slb;
 609
 610                break;
 611        default:
 612                BUG();
 613                break;
 614        }
 615
 616#ifdef CONFIG_PPC_64K_PAGES
 617        /*
 618         * Mark this as a 64k segment if the host is using
 619         * 64k pages, the host MMU supports 64k pages and
 620         * the guest segment page size is >= 64k,
 621         * but not if this segment contains the magic page.
 622         */
 623        if (pagesize >= MMU_PAGE_64K &&
 624            mmu_psize_defs[MMU_PAGE_64K].shift &&
 625            !segment_contains_magic_page(vcpu, esid))
 626                gvsid |= VSID_64K;
 627#endif
 628
 629        if (kvmppc_get_msr(vcpu) & MSR_PR)
 630                gvsid |= VSID_PR;
 631
 632        *vsid = gvsid;
 633        return 0;
 634
 635no_slb:
 636        /* Catch magic page case */
 637        if (unlikely(mp_ea) &&
 638            unlikely(esid == (mp_ea >> SID_SHIFT)) &&
 639            !(kvmppc_get_msr(vcpu) & MSR_PR)) {
 640                *vsid = VSID_REAL | esid;
 641                return 0;
 642        }
 643
 644        return -EINVAL;
 645}
 646
 647static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu)
 648{
 649        return (to_book3s(vcpu)->hid[5] & 0x80);
 650}
 651
 652void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
 653{
 654        struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
 655
 656        mmu->mfsrin = NULL;
 657        mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin;
 658        mmu->slbmte = kvmppc_mmu_book3s_64_slbmte;
 659        mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee;
 660        mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev;
 661        mmu->slbfee = kvmppc_mmu_book3s_64_slbfee;
 662        mmu->slbie = kvmppc_mmu_book3s_64_slbie;
 663        mmu->slbia = kvmppc_mmu_book3s_64_slbia;
 664        mmu->xlate = kvmppc_mmu_book3s_64_xlate;
 665        mmu->tlbie = kvmppc_mmu_book3s_64_tlbie;
 666        mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid;
 667        mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp;
 668        mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32;
 669
 670        vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
 671}
 672