linux/arch/powerpc/kvm/44x_tlb.c
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License, version 2, as
   4 * published by the Free Software Foundation.
   5 *
   6 * This program is distributed in the hope that it will be useful,
   7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   9 * GNU General Public License for more details.
  10 *
  11 * You should have received a copy of the GNU General Public License
  12 * along with this program; if not, write to the Free Software
  13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  14 *
  15 * Copyright IBM Corp. 2007
  16 *
  17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18 */
  19
  20#include <linux/types.h>
  21#include <linux/string.h>
  22#include <linux/kvm.h>
  23#include <linux/kvm_host.h>
  24#include <linux/highmem.h>
  25
  26#include <asm/tlbflush.h>
  27#include <asm/mmu-44x.h>
  28#include <asm/kvm_ppc.h>
  29#include <asm/kvm_44x.h>
  30#include "timing.h"
  31
  32#include "44x_tlb.h"
  33
  34#ifndef PPC44x_TLBE_SIZE
  35#define PPC44x_TLBE_SIZE        PPC44x_TLB_4K
  36#endif
  37
  38#define PAGE_SIZE_4K (1<<12)
  39#define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1))
  40
  41#define PPC44x_TLB_UATTR_MASK \
  42        (PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3)
  43#define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
  44#define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
  45
  46#ifdef DEBUG
  47void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
  48{
  49        struct kvmppc_44x_tlbe *tlbe;
  50        int i;
  51
  52        printk("vcpu %d TLB dump:\n", vcpu->vcpu_id);
  53        printk("| %2s | %3s | %8s | %8s | %8s |\n",
  54                        "nr", "tid", "word0", "word1", "word2");
  55
  56        for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
  57                tlbe = &vcpu_44x->guest_tlb[i];
  58                if (tlbe->word0 & PPC44x_TLB_VALID)
  59                        printk(" G%2d |  %02X | %08X | %08X | %08X |\n",
  60                               i, tlbe->tid, tlbe->word0, tlbe->word1,
  61                               tlbe->word2);
  62        }
  63}
  64#endif
  65
  66static inline void kvmppc_44x_tlbie(unsigned int index)
  67{
  68        /* 0 <= index < 64, so the V bit is clear and we can use the index as
  69         * word0. */
  70        asm volatile(
  71                "tlbwe %[index], %[index], 0\n"
  72        :
  73        : [index] "r"(index)
  74        );
  75}
  76
  77static inline void kvmppc_44x_tlbre(unsigned int index,
  78                                    struct kvmppc_44x_tlbe *tlbe)
  79{
  80        asm volatile(
  81                "tlbre %[word0], %[index], 0\n"
  82                "mfspr %[tid], %[sprn_mmucr]\n"
  83                "andi. %[tid], %[tid], 0xff\n"
  84                "tlbre %[word1], %[index], 1\n"
  85                "tlbre %[word2], %[index], 2\n"
  86                : [word0] "=r"(tlbe->word0),
  87                  [word1] "=r"(tlbe->word1),
  88                  [word2] "=r"(tlbe->word2),
  89                  [tid]   "=r"(tlbe->tid)
  90                : [index] "r"(index),
  91                  [sprn_mmucr] "i"(SPRN_MMUCR)
  92                : "cc"
  93        );
  94}
  95
  96static inline void kvmppc_44x_tlbwe(unsigned int index,
  97                                    struct kvmppc_44x_tlbe *stlbe)
  98{
  99        unsigned long tmp;
 100
 101        asm volatile(
 102                "mfspr %[tmp], %[sprn_mmucr]\n"
 103                "rlwimi %[tmp], %[tid], 0, 0xff\n"
 104                "mtspr %[sprn_mmucr], %[tmp]\n"
 105                "tlbwe %[word0], %[index], 0\n"
 106                "tlbwe %[word1], %[index], 1\n"
 107                "tlbwe %[word2], %[index], 2\n"
 108                : [tmp]   "=&r"(tmp)
 109                : [word0] "r"(stlbe->word0),
 110                  [word1] "r"(stlbe->word1),
 111                  [word2] "r"(stlbe->word2),
 112                  [tid]   "r"(stlbe->tid),
 113                  [index] "r"(index),
 114                  [sprn_mmucr] "i"(SPRN_MMUCR)
 115        );
 116}
 117
 118static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
 119{
 120        /* We only care about the guest's permission and user bits. */
 121        attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_UATTR_MASK;
 122
 123        if (!usermode) {
 124                /* Guest is in supervisor mode, so we need to translate guest
 125                 * supervisor permissions into user permissions. */
 126                attrib &= ~PPC44x_TLB_USER_PERM_MASK;
 127                attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3;
 128        }
 129
 130        /* Make sure host can always access this memory. */
 131        attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;
 132
 133        /* WIMGE = 0b00100 */
 134        attrib |= PPC44x_TLB_M;
 135
 136        return attrib;
 137}
 138
 139/* Load shadow TLB back into hardware. */
 140void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu)
 141{
 142        struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
 143        int i;
 144
 145        for (i = 0; i <= tlb_44x_hwater; i++) {
 146                struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
 147
 148                if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
 149                        kvmppc_44x_tlbwe(i, stlbe);
 150        }
 151}
 152
 153static void kvmppc_44x_tlbe_set_modified(struct kvmppc_vcpu_44x *vcpu_44x,
 154                                         unsigned int i)
 155{
 156        vcpu_44x->shadow_tlb_mod[i] = 1;
 157}
 158
 159/* Save hardware TLB to the vcpu, and invalidate all guest mappings. */
 160void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu)
 161{
 162        struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
 163        int i;
 164
 165        for (i = 0; i <= tlb_44x_hwater; i++) {
 166                struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
 167
 168                if (vcpu_44x->shadow_tlb_mod[i])
 169                        kvmppc_44x_tlbre(i, stlbe);
 170
 171                if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
 172                        kvmppc_44x_tlbie(i);
 173        }
 174}
 175
 176
 177/* Search the guest TLB for a matching entry. */
 178int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
 179                         unsigned int as)
 180{
 181        struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
 182        int i;
 183
 184        /* XXX Replace loop with fancy data structures. */
 185        for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
 186                struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i];
 187                unsigned int tid;
 188
 189                if (eaddr < get_tlb_eaddr(tlbe))
 190                        continue;
 191
 192                if (eaddr > get_tlb_end(tlbe))
 193                        continue;
 194
 195                tid = get_tlb_tid(tlbe);
 196                if (tid && (tid != pid))
 197                        continue;
 198
 199                if (!get_tlb_v(tlbe))
 200                        continue;
 201
 202                if (get_tlb_ts(tlbe) != as)
 203                        continue;
 204
 205                return i;
 206        }
 207
 208        return -1;
 209}
 210
 211gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
 212                       gva_t eaddr)
 213{
 214        struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
 215        struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
 216        unsigned int pgmask = get_tlb_bytes(gtlbe) - 1;
 217
 218        return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
 219}
 220
 221int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
 222{
 223        unsigned int as = !!(vcpu->arch.msr & MSR_IS);
 224
 225        return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
 226}
 227
 228int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
 229{
 230        unsigned int as = !!(vcpu->arch.msr & MSR_DS);
 231
 232        return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
 233}
 234
 235void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
 236{
 237}
 238
 239void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
 240{
 241}
 242
 243static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x,
 244                                      unsigned int stlb_index)
 245{
 246        struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[stlb_index];
 247
 248        if (!ref->page)
 249                return;
 250
 251        /* Discard from the TLB. */
 252        /* Note: we could actually invalidate a host mapping, if the host overwrote
 253         * this TLB entry since we inserted a guest mapping. */
 254        kvmppc_44x_tlbie(stlb_index);
 255
 256        /* Now release the page. */
 257        if (ref->writeable)
 258                kvm_release_page_dirty(ref->page);
 259        else
 260                kvm_release_page_clean(ref->page);
 261
 262        ref->page = NULL;
 263
 264        /* XXX set tlb_44x_index to stlb_index? */
 265
 266        KVMTRACE_1D(STLB_INVAL, &vcpu_44x->vcpu, stlb_index, handler);
 267}
 268
 269void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
 270{
 271        struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
 272        int i;
 273
 274        for (i = 0; i <= tlb_44x_hwater; i++)
 275                kvmppc_44x_shadow_release(vcpu_44x, i);
 276}
 277
 278/**
 279 * kvmppc_mmu_map -- create a host mapping for guest memory
 280 *
 281 * If the guest wanted a larger page than the host supports, only the first
 282 * host page is mapped here and the rest are demand faulted.
 283 *
 284 * If the guest wanted a smaller page than the host page size, we map only the
 285 * guest-size page (i.e. not a full host page mapping).
 286 *
 287 * Caller must ensure that the specified guest TLB entry is safe to insert into
 288 * the shadow TLB.
 289 */
 290void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
 291                    unsigned int gtlb_index)
 292{
 293        struct kvmppc_44x_tlbe stlbe;
 294        struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
 295        struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
 296        struct kvmppc_44x_shadow_ref *ref;
 297        struct page *new_page;
 298        hpa_t hpaddr;
 299        gfn_t gfn;
 300        u32 asid = gtlbe->tid;
 301        u32 flags = gtlbe->word2;
 302        u32 max_bytes = get_tlb_bytes(gtlbe);
 303        unsigned int victim;
 304
 305        /* Select TLB entry to clobber. Indirectly guard against races with the TLB
 306         * miss handler by disabling interrupts. */
 307        local_irq_disable();
 308        victim = ++tlb_44x_index;
 309        if (victim > tlb_44x_hwater)
 310                victim = 0;
 311        tlb_44x_index = victim;
 312        local_irq_enable();
 313
 314        /* Get reference to new page. */
 315        gfn = gpaddr >> PAGE_SHIFT;
 316        new_page = gfn_to_page(vcpu->kvm, gfn);
 317        if (is_error_page(new_page)) {
 318                printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
 319                kvm_release_page_clean(new_page);
 320                return;
 321        }
 322        hpaddr = page_to_phys(new_page);
 323
 324        /* Invalidate any previous shadow mappings. */
 325        kvmppc_44x_shadow_release(vcpu_44x, victim);
 326
 327        /* XXX Make sure (va, size) doesn't overlap any other
 328         * entries. 440x6 user manual says the result would be
 329         * "undefined." */
 330
 331        /* XXX what about AS? */
 332
 333        /* Force TS=1 for all guest mappings. */
 334        stlbe.word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS;
 335
 336        if (max_bytes >= PAGE_SIZE) {
 337                /* Guest mapping is larger than or equal to host page size. We can use
 338                 * a "native" host mapping. */
 339                stlbe.word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE;
 340        } else {
 341                /* Guest mapping is smaller than host page size. We must restrict the
 342                 * size of the mapping to be at most the smaller of the two, but for
 343                 * simplicity we fall back to a 4K mapping (this is probably what the
 344                 * guest is using anyways). */
 345                stlbe.word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K;
 346
 347                /* 'hpaddr' is a host page, which is larger than the mapping we're
 348                 * inserting here. To compensate, we must add the in-page offset to the
 349                 * sub-page. */
 350                hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K);
 351        }
 352
 353        stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
 354        stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags,
 355                                                    vcpu->arch.msr & MSR_PR);
 356        stlbe.tid = !(asid & 0xff);
 357
 358        /* Keep track of the reference so we can properly release it later. */
 359        ref = &vcpu_44x->shadow_refs[victim];
 360        ref->page = new_page;
 361        ref->gtlb_index = gtlb_index;
 362        ref->writeable = !!(stlbe.word2 & PPC44x_TLB_UW);
 363        ref->tid = stlbe.tid;
 364
 365        /* Insert shadow mapping into hardware TLB. */
 366        kvmppc_44x_tlbe_set_modified(vcpu_44x, victim);
 367        kvmppc_44x_tlbwe(victim, &stlbe);
 368        KVMTRACE_5D(STLB_WRITE, vcpu, victim, stlbe.tid, stlbe.word0, stlbe.word1,
 369                    stlbe.word2, handler);
 370}
 371
 372/* For a particular guest TLB entry, invalidate the corresponding host TLB
 373 * mappings and release the host pages. */
 374static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu,
 375                                  unsigned int gtlb_index)
 376{
 377        struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
 378        int i;
 379
 380        for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
 381                struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
 382                if (ref->gtlb_index == gtlb_index)
 383                        kvmppc_44x_shadow_release(vcpu_44x, i);
 384        }
 385}
 386
 387void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
 388{
 389        vcpu->arch.shadow_pid = !usermode;
 390}
 391
 392void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
 393{
 394        struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
 395        int i;
 396
 397        if (unlikely(vcpu->arch.pid == new_pid))
 398                return;
 399
 400        vcpu->arch.pid = new_pid;
 401
 402        /* Guest userspace runs with TID=0 mappings and PID=0, to make sure it
 403         * can't access guest kernel mappings (TID=1). When we switch to a new
 404         * guest PID, which will also use host PID=0, we must discard the old guest
 405         * userspace mappings. */
 406        for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
 407                struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
 408
 409                if (ref->tid == 0)
 410                        kvmppc_44x_shadow_release(vcpu_44x, i);
 411        }
 412}
 413
 414static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
 415                             const struct kvmppc_44x_tlbe *tlbe)
 416{
 417        gpa_t gpa;
 418
 419        if (!get_tlb_v(tlbe))
 420                return 0;
 421
 422        /* Does it match current guest AS? */
 423        /* XXX what about IS != DS? */
 424        if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS))
 425                return 0;
 426
 427        gpa = get_tlb_raddr(tlbe);
 428        if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
 429                /* Mapping is not for RAM. */
 430                return 0;
 431
 432        return 1;
 433}
 434
 435int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
 436{
 437        struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
 438        struct kvmppc_44x_tlbe *tlbe;
 439        unsigned int gtlb_index;
 440
 441        gtlb_index = vcpu->arch.gpr[ra];
 442        if (gtlb_index > KVM44x_GUEST_TLB_SIZE) {
 443                printk("%s: index %d\n", __func__, gtlb_index);
 444                kvmppc_dump_vcpu(vcpu);
 445                return EMULATE_FAIL;
 446        }
 447
 448        tlbe = &vcpu_44x->guest_tlb[gtlb_index];
 449
 450        /* Invalidate shadow mappings for the about-to-be-clobbered TLB entry. */
 451        if (tlbe->word0 & PPC44x_TLB_VALID)
 452                kvmppc_44x_invalidate(vcpu, gtlb_index);
 453
 454        switch (ws) {
 455        case PPC44x_TLB_PAGEID:
 456                tlbe->tid = get_mmucr_stid(vcpu);
 457                tlbe->word0 = vcpu->arch.gpr[rs];
 458                break;
 459
 460        case PPC44x_TLB_XLAT:
 461                tlbe->word1 = vcpu->arch.gpr[rs];
 462                break;
 463
 464        case PPC44x_TLB_ATTRIB:
 465                tlbe->word2 = vcpu->arch.gpr[rs];
 466                break;
 467
 468        default:
 469                return EMULATE_FAIL;
 470        }
 471
 472        if (tlbe_is_host_safe(vcpu, tlbe)) {
 473                gva_t eaddr;
 474                gpa_t gpaddr;
 475                u32 bytes;
 476
 477                eaddr = get_tlb_eaddr(tlbe);
 478                gpaddr = get_tlb_raddr(tlbe);
 479
 480                /* Use the advertised page size to mask effective and real addrs. */
 481                bytes = get_tlb_bytes(tlbe);
 482                eaddr &= ~(bytes - 1);
 483                gpaddr &= ~(bytes - 1);
 484
 485                kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
 486        }
 487
 488        KVMTRACE_5D(GTLB_WRITE, vcpu, gtlb_index, tlbe->tid, tlbe->word0,
 489                    tlbe->word1, tlbe->word2, handler);
 490
 491        kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
 492        return EMULATE_DONE;
 493}
 494
 495int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
 496{
 497        u32 ea;
 498        int gtlb_index;
 499        unsigned int as = get_mmucr_sts(vcpu);
 500        unsigned int pid = get_mmucr_stid(vcpu);
 501
 502        ea = vcpu->arch.gpr[rb];
 503        if (ra)
 504                ea += vcpu->arch.gpr[ra];
 505
 506        gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
 507        if (rc) {
 508                if (gtlb_index < 0)
 509                        vcpu->arch.cr &= ~0x20000000;
 510                else
 511                        vcpu->arch.cr |= 0x20000000;
 512        }
 513        vcpu->arch.gpr[rt] = gtlb_index;
 514
 515        kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
 516        return EMULATE_DONE;
 517}
 518