linux/arch/powerpc/kvm/e500_tlb.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
   3 *
   4 * Author: Yu Liu, yu.liu@freescale.com
   5 *
   6 * Description:
   7 * This file is based on arch/powerpc/kvm/44x_tlb.c,
   8 * by Hollis Blanchard <hollisb@us.ibm.com>.
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License, version 2, as
  12 * published by the Free Software Foundation.
  13 */
  14
  15#include <linux/types.h>
  16#include <linux/string.h>
  17#include <linux/kvm.h>
  18#include <linux/kvm_host.h>
  19#include <linux/highmem.h>
  20#include <asm/kvm_ppc.h>
  21#include <asm/kvm_e500.h>
  22
  23#include "../mm/mmu_decl.h"
  24#include "e500_tlb.h"
  25
  26#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)
  27
  28static unsigned int tlb1_entry_num;
  29
  30void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
  31{
  32        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  33        struct tlbe *tlbe;
  34        int i, tlbsel;
  35
  36        printk("| %8s | %8s | %8s | %8s | %8s |\n",
  37                        "nr", "mas1", "mas2", "mas3", "mas7");
  38
  39        for (tlbsel = 0; tlbsel < 2; tlbsel++) {
  40                printk("Guest TLB%d:\n", tlbsel);
  41                for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) {
  42                        tlbe = &vcpu_e500->guest_tlb[tlbsel][i];
  43                        if (tlbe->mas1 & MAS1_VALID)
  44                                printk(" G[%d][%3d] |  %08X | %08X | %08X | %08X |\n",
  45                                        tlbsel, i, tlbe->mas1, tlbe->mas2,
  46                                        tlbe->mas3, tlbe->mas7);
  47                }
  48        }
  49
  50        for (tlbsel = 0; tlbsel < 2; tlbsel++) {
  51                printk("Shadow TLB%d:\n", tlbsel);
  52                for (i = 0; i < vcpu_e500->shadow_tlb_size[tlbsel]; i++) {
  53                        tlbe = &vcpu_e500->shadow_tlb[tlbsel][i];
  54                        if (tlbe->mas1 & MAS1_VALID)
  55                                printk(" S[%d][%3d] |  %08X | %08X | %08X | %08X |\n",
  56                                        tlbsel, i, tlbe->mas1, tlbe->mas2,
  57                                        tlbe->mas3, tlbe->mas7);
  58                }
  59        }
  60}
  61
  62static inline unsigned int tlb0_get_next_victim(
  63                struct kvmppc_vcpu_e500 *vcpu_e500)
  64{
  65        unsigned int victim;
  66
  67        victim = vcpu_e500->guest_tlb_nv[0]++;
  68        if (unlikely(vcpu_e500->guest_tlb_nv[0] >= KVM_E500_TLB0_WAY_NUM))
  69                vcpu_e500->guest_tlb_nv[0] = 0;
  70
  71        return victim;
  72}
  73
  74static inline unsigned int tlb1_max_shadow_size(void)
  75{
  76        return tlb1_entry_num - tlbcam_index;
  77}
  78
  79static inline int tlbe_is_writable(struct tlbe *tlbe)
  80{
  81        return tlbe->mas3 & (MAS3_SW|MAS3_UW);
  82}
  83
  84static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
  85{
  86        /* Mask off reserved bits. */
  87        mas3 &= MAS3_ATTRIB_MASK;
  88
  89        if (!usermode) {
  90                /* Guest is in supervisor mode,
  91                 * so we need to translate guest
  92                 * supervisor permissions into user permissions. */
  93                mas3 &= ~E500_TLB_USER_PERM_MASK;
  94                mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
  95        }
  96
  97        return mas3 | E500_TLB_SUPER_PERM_MASK;
  98}
  99
 100static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
 101{
 102#ifdef CONFIG_SMP
 103        return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
 104#else
 105        return mas2 & MAS2_ATTRIB_MASK;
 106#endif
 107}
 108
 109/*
 110 * writing shadow tlb entry to host TLB
 111 */
 112static inline void __write_host_tlbe(struct tlbe *stlbe)
 113{
 114        mtspr(SPRN_MAS1, stlbe->mas1);
 115        mtspr(SPRN_MAS2, stlbe->mas2);
 116        mtspr(SPRN_MAS3, stlbe->mas3);
 117        mtspr(SPRN_MAS7, stlbe->mas7);
 118        __asm__ __volatile__ ("tlbwe\n" : : );
 119}
 120
 121static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
 122                int tlbsel, int esel)
 123{
 124        struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
 125
 126        local_irq_disable();
 127        if (tlbsel == 0) {
 128                __write_host_tlbe(stlbe);
 129        } else {
 130                unsigned register mas0;
 131
 132                mas0 = mfspr(SPRN_MAS0);
 133
 134                mtspr(SPRN_MAS0, MAS0_TLBSEL(1) | MAS0_ESEL(to_htlb1_esel(esel)));
 135                __write_host_tlbe(stlbe);
 136
 137                mtspr(SPRN_MAS0, mas0);
 138        }
 139        local_irq_enable();
 140}
 141
 142void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
 143{
 144        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 145        int i;
 146        unsigned register mas0;
 147
 148        /* Load all valid TLB1 entries to reduce guest tlb miss fault */
 149        local_irq_disable();
 150        mas0 = mfspr(SPRN_MAS0);
 151        for (i = 0; i < tlb1_max_shadow_size(); i++) {
 152                struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i];
 153
 154                if (get_tlb_v(stlbe)) {
 155                        mtspr(SPRN_MAS0, MAS0_TLBSEL(1)
 156                                        | MAS0_ESEL(to_htlb1_esel(i)));
 157                        __write_host_tlbe(stlbe);
 158                }
 159        }
 160        mtspr(SPRN_MAS0, mas0);
 161        local_irq_enable();
 162}
 163
 164void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
 165{
 166        _tlbil_all();
 167}
 168
 169/* Search the guest TLB for a matching entry. */
 170static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
 171                gva_t eaddr, int tlbsel, unsigned int pid, int as)
 172{
 173        int i;
 174
 175        /* XXX Replace loop with fancy data structures. */
 176        for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) {
 177                struct tlbe *tlbe = &vcpu_e500->guest_tlb[tlbsel][i];
 178                unsigned int tid;
 179
 180                if (eaddr < get_tlb_eaddr(tlbe))
 181                        continue;
 182
 183                if (eaddr > get_tlb_end(tlbe))
 184                        continue;
 185
 186                tid = get_tlb_tid(tlbe);
 187                if (tid && (tid != pid))
 188                        continue;
 189
 190                if (!get_tlb_v(tlbe))
 191                        continue;
 192
 193                if (get_tlb_ts(tlbe) != as && as != -1)
 194                        continue;
 195
 196                return i;
 197        }
 198
 199        return -1;
 200}
 201
 202static void kvmppc_e500_shadow_release(struct kvmppc_vcpu_e500 *vcpu_e500,
 203                int tlbsel, int esel)
 204{
 205        struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
 206        struct page *page = vcpu_e500->shadow_pages[tlbsel][esel];
 207
 208        if (page) {
 209                vcpu_e500->shadow_pages[tlbsel][esel] = NULL;
 210
 211                if (get_tlb_v(stlbe)) {
 212                        if (tlbe_is_writable(stlbe))
 213                                kvm_release_page_dirty(page);
 214                        else
 215                                kvm_release_page_clean(page);
 216                }
 217        }
 218}
 219
 220static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
 221                int tlbsel, int esel)
 222{
 223        struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
 224
 225        kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
 226        stlbe->mas1 = 0;
 227        KVMTRACE_5D(STLB_INVAL, &vcpu_e500->vcpu, index_of(tlbsel, esel),
 228                        stlbe->mas1, stlbe->mas2, stlbe->mas3, stlbe->mas7,
 229                        handler);
 230}
 231
 232static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
 233                gva_t eaddr, gva_t eend, u32 tid)
 234{
 235        unsigned int pid = tid & 0xff;
 236        unsigned int i;
 237
 238        /* XXX Replace loop with fancy data structures. */
 239        for (i = 0; i < vcpu_e500->guest_tlb_size[1]; i++) {
 240                struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i];
 241                unsigned int tid;
 242
 243                if (!get_tlb_v(stlbe))
 244                        continue;
 245
 246                if (eend < get_tlb_eaddr(stlbe))
 247                        continue;
 248
 249                if (eaddr > get_tlb_end(stlbe))
 250                        continue;
 251
 252                tid = get_tlb_tid(stlbe);
 253                if (tid && (tid != pid))
 254                        continue;
 255
 256                kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i);
 257                write_host_tlbe(vcpu_e500, 1, i);
 258        }
 259}
 260
 261static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
 262                unsigned int eaddr, int as)
 263{
 264        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 265        unsigned int victim, pidsel, tsized;
 266        int tlbsel;
 267
 268        /* since we only have two TLBs, only lower bit is used. */
 269        tlbsel = (vcpu_e500->mas4 >> 28) & 0x1;
 270        victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
 271        pidsel = (vcpu_e500->mas4 >> 16) & 0xf;
 272        tsized = (vcpu_e500->mas4 >> 8) & 0xf;
 273
 274        vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
 275                | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
 276        vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
 277                | MAS1_TID(vcpu_e500->pid[pidsel])
 278                | MAS1_TSIZE(tsized);
 279        vcpu_e500->mas2 = (eaddr & MAS2_EPN)
 280                | (vcpu_e500->mas4 & MAS2_ATTRIB_MASK);
 281        vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
 282        vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1)
 283                | (get_cur_pid(vcpu) << 16)
 284                | (as ? MAS6_SAS : 0);
 285        vcpu_e500->mas7 = 0;
 286}
 287
 288static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 289        u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel)
 290{
 291        struct page *new_page;
 292        struct tlbe *stlbe;
 293        hpa_t hpaddr;
 294
 295        stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
 296
 297        /* Get reference to new page. */
 298        new_page = gfn_to_page(vcpu_e500->vcpu.kvm, gfn);
 299        if (is_error_page(new_page)) {
 300                printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
 301                kvm_release_page_clean(new_page);
 302                return;
 303        }
 304        hpaddr = page_to_phys(new_page);
 305
 306        /* Drop reference to old page. */
 307        kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
 308
 309        vcpu_e500->shadow_pages[tlbsel][esel] = new_page;
 310
 311        /* Force TS=1 IPROT=0 TSIZE=4KB for all guest mappings. */
 312        stlbe->mas1 = MAS1_TSIZE(BOOKE_PAGESZ_4K)
 313                | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID;
 314        stlbe->mas2 = (gvaddr & MAS2_EPN)
 315                | e500_shadow_mas2_attrib(gtlbe->mas2,
 316                                vcpu_e500->vcpu.arch.msr & MSR_PR);
 317        stlbe->mas3 = (hpaddr & MAS3_RPN)
 318                | e500_shadow_mas3_attrib(gtlbe->mas3,
 319                                vcpu_e500->vcpu.arch.msr & MSR_PR);
 320        stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN;
 321
 322        KVMTRACE_5D(STLB_WRITE, &vcpu_e500->vcpu, index_of(tlbsel, esel),
 323                        stlbe->mas1, stlbe->mas2, stlbe->mas3, stlbe->mas7,
 324                        handler);
 325}
 326
 327/* XXX only map the one-one case, for now use TLB0 */
 328static int kvmppc_e500_stlbe_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 329                int tlbsel, int esel)
 330{
 331        struct tlbe *gtlbe;
 332
 333        gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
 334
 335        kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
 336                        get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
 337                        gtlbe, tlbsel, esel);
 338
 339        return esel;
 340}
 341
 342/* Caller must ensure that the specified guest TLB entry is safe to insert into
 343 * the shadow TLB. */
 344/* XXX for both one-one and one-to-many , for now use TLB1 */
 345static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 346                u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe)
 347{
 348        unsigned int victim;
 349
 350        victim = vcpu_e500->guest_tlb_nv[1]++;
 351
 352        if (unlikely(vcpu_e500->guest_tlb_nv[1] >= tlb1_max_shadow_size()))
 353                vcpu_e500->guest_tlb_nv[1] = 0;
 354
 355        kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim);
 356
 357        return victim;
 358}
 359
 360/* Invalidate all guest kernel mappings when enter usermode,
 361 * so that when they fault back in they will get the
 362 * proper permission bits. */
 363void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
 364{
 365        if (usermode) {
 366                struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 367                int i;
 368
 369                /* XXX Replace loop with fancy data structures. */
 370                for (i = 0; i < tlb1_max_shadow_size(); i++)
 371                        kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i);
 372
 373                _tlbil_all();
 374        }
 375}
 376
 377static int kvmppc_e500_gtlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
 378                int tlbsel, int esel)
 379{
 380        struct tlbe *gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
 381
 382        if (unlikely(get_tlb_iprot(gtlbe)))
 383                return -1;
 384
 385        if (tlbsel == 1) {
 386                kvmppc_e500_tlb1_invalidate(vcpu_e500, get_tlb_eaddr(gtlbe),
 387                                get_tlb_end(gtlbe),
 388                                get_tlb_tid(gtlbe));
 389        } else {
 390                kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel);
 391        }
 392
 393        gtlbe->mas1 = 0;
 394
 395        return 0;
 396}
 397
 398int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
 399{
 400        int esel;
 401
 402        if (value & MMUCSR0_TLB0FI)
 403                for (esel = 0; esel < vcpu_e500->guest_tlb_size[0]; esel++)
 404                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
 405        if (value & MMUCSR0_TLB1FI)
 406                for (esel = 0; esel < vcpu_e500->guest_tlb_size[1]; esel++)
 407                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
 408
 409        _tlbil_all();
 410
 411        return EMULATE_DONE;
 412}
 413
 414int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
 415{
 416        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 417        unsigned int ia;
 418        int esel, tlbsel;
 419        gva_t ea;
 420
 421        ea = ((ra) ? vcpu->arch.gpr[ra] : 0) + vcpu->arch.gpr[rb];
 422
 423        ia = (ea >> 2) & 0x1;
 424
 425        /* since we only have two TLBs, only lower bit is used. */
 426        tlbsel = (ea >> 3) & 0x1;
 427
 428        if (ia) {
 429                /* invalidate all entries */
 430                for (esel = 0; esel < vcpu_e500->guest_tlb_size[tlbsel]; esel++)
 431                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
 432        } else {
 433                ea &= 0xfffff000;
 434                esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
 435                                get_cur_pid(vcpu), -1);
 436                if (esel >= 0)
 437                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
 438        }
 439
 440        _tlbil_all();
 441
 442        return EMULATE_DONE;
 443}
 444
 445int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
 446{
 447        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 448        int tlbsel, esel;
 449        struct tlbe *gtlbe;
 450
 451        tlbsel = get_tlb_tlbsel(vcpu_e500);
 452        esel = get_tlb_esel(vcpu_e500, tlbsel);
 453
 454        gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
 455        vcpu_e500->mas0 &= ~MAS0_NV(~0);
 456        vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
 457        vcpu_e500->mas1 = gtlbe->mas1;
 458        vcpu_e500->mas2 = gtlbe->mas2;
 459        vcpu_e500->mas3 = gtlbe->mas3;
 460        vcpu_e500->mas7 = gtlbe->mas7;
 461
 462        return EMULATE_DONE;
 463}
 464
 465int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
 466{
 467        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 468        int as = !!get_cur_sas(vcpu_e500);
 469        unsigned int pid = get_cur_spid(vcpu_e500);
 470        int esel, tlbsel;
 471        struct tlbe *gtlbe = NULL;
 472        gva_t ea;
 473
 474        ea = vcpu->arch.gpr[rb];
 475
 476        for (tlbsel = 0; tlbsel < 2; tlbsel++) {
 477                esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
 478                if (esel >= 0) {
 479                        gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
 480                        break;
 481                }
 482        }
 483
 484        if (gtlbe) {
 485                vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
 486                        | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
 487                vcpu_e500->mas1 = gtlbe->mas1;
 488                vcpu_e500->mas2 = gtlbe->mas2;
 489                vcpu_e500->mas3 = gtlbe->mas3;
 490                vcpu_e500->mas7 = gtlbe->mas7;
 491        } else {
 492                int victim;
 493
 494                /* since we only have two TLBs, only lower bit is used. */
 495                tlbsel = vcpu_e500->mas4 >> 28 & 0x1;
 496                victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
 497
 498                vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
 499                        | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
 500                vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0)
 501                        | (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0))
 502                        | (vcpu_e500->mas4 & MAS4_TSIZED(~0));
 503                vcpu_e500->mas2 &= MAS2_EPN;
 504                vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK;
 505                vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
 506                vcpu_e500->mas7 = 0;
 507        }
 508
 509        return EMULATE_DONE;
 510}
 511
 512int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
 513{
 514        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 515        u64 eaddr;
 516        u64 raddr;
 517        u32 tid;
 518        struct tlbe *gtlbe;
 519        int tlbsel, esel, stlbsel, sesel;
 520
 521        tlbsel = get_tlb_tlbsel(vcpu_e500);
 522        esel = get_tlb_esel(vcpu_e500, tlbsel);
 523
 524        gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
 525
 526        if (get_tlb_v(gtlbe) && tlbsel == 1) {
 527                eaddr = get_tlb_eaddr(gtlbe);
 528                tid = get_tlb_tid(gtlbe);
 529                kvmppc_e500_tlb1_invalidate(vcpu_e500, eaddr,
 530                                get_tlb_end(gtlbe), tid);
 531        }
 532
 533        gtlbe->mas1 = vcpu_e500->mas1;
 534        gtlbe->mas2 = vcpu_e500->mas2;
 535        gtlbe->mas3 = vcpu_e500->mas3;
 536        gtlbe->mas7 = vcpu_e500->mas7;
 537
 538        KVMTRACE_5D(GTLB_WRITE, vcpu, vcpu_e500->mas0,
 539                        gtlbe->mas1, gtlbe->mas2, gtlbe->mas3, gtlbe->mas7,
 540                        handler);
 541
 542        /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
 543        if (tlbe_is_host_safe(vcpu, gtlbe)) {
 544                switch (tlbsel) {
 545                case 0:
 546                        /* TLB0 */
 547                        gtlbe->mas1 &= ~MAS1_TSIZE(~0);
 548                        gtlbe->mas1 |= MAS1_TSIZE(BOOKE_PAGESZ_4K);
 549
 550                        stlbsel = 0;
 551                        sesel = kvmppc_e500_stlbe_map(vcpu_e500, 0, esel);
 552
 553                        break;
 554
 555                case 1:
 556                        /* TLB1 */
 557                        eaddr = get_tlb_eaddr(gtlbe);
 558                        raddr = get_tlb_raddr(gtlbe);
 559
 560                        /* Create a 4KB mapping on the host.
 561                         * If the guest wanted a large page,
 562                         * only the first 4KB is mapped here and the rest
 563                         * are mapped on the fly. */
 564                        stlbsel = 1;
 565                        sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
 566                                        raddr >> PAGE_SHIFT, gtlbe);
 567                        break;
 568
 569                default:
 570                        BUG();
 571                }
 572                write_host_tlbe(vcpu_e500, stlbsel, sesel);
 573        }
 574
 575        return EMULATE_DONE;
 576}
 577
 578int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
 579{
 580        unsigned int as = !!(vcpu->arch.msr & MSR_IS);
 581
 582        return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
 583}
 584
 585int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
 586{
 587        unsigned int as = !!(vcpu->arch.msr & MSR_DS);
 588
 589        return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
 590}
 591
 592void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
 593{
 594        unsigned int as = !!(vcpu->arch.msr & MSR_IS);
 595
 596        kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
 597}
 598
 599void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
 600{
 601        unsigned int as = !!(vcpu->arch.msr & MSR_DS);
 602
 603        kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
 604}
 605
 606gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
 607                        gva_t eaddr)
 608{
 609        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 610        struct tlbe *gtlbe =
 611                &vcpu_e500->guest_tlb[tlbsel_of(index)][esel_of(index)];
 612        u64 pgmask = get_tlb_bytes(gtlbe) - 1;
 613
 614        return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
 615}
 616
 617void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
 618{
 619        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 620        int tlbsel, i;
 621
 622        for (tlbsel = 0; tlbsel < 2; tlbsel++)
 623                for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++)
 624                        kvmppc_e500_shadow_release(vcpu_e500, tlbsel, i);
 625
 626        /* discard all guest mapping */
 627        _tlbil_all();
 628}
 629
 630void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
 631                        unsigned int index)
 632{
 633        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 634        int tlbsel = tlbsel_of(index);
 635        int esel = esel_of(index);
 636        int stlbsel, sesel;
 637
 638        switch (tlbsel) {
 639        case 0:
 640                stlbsel = 0;
 641                sesel = esel;
 642                break;
 643
 644        case 1: {
 645                gfn_t gfn = gpaddr >> PAGE_SHIFT;
 646                struct tlbe *gtlbe
 647                        = &vcpu_e500->guest_tlb[tlbsel][esel];
 648
 649                stlbsel = 1;
 650                sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe);
 651                break;
 652        }
 653
 654        default:
 655                BUG();
 656                break;
 657        }
 658        write_host_tlbe(vcpu_e500, stlbsel, sesel);
 659}
 660
 661int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
 662                                gva_t eaddr, unsigned int pid, int as)
 663{
 664        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 665        int esel, tlbsel;
 666
 667        for (tlbsel = 0; tlbsel < 2; tlbsel++) {
 668                esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
 669                if (esel >= 0)
 670                        return index_of(tlbsel, esel);
 671        }
 672
 673        return -1;
 674}
 675
 676void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
 677{
 678        struct tlbe *tlbe;
 679
 680        /* Insert large initial mapping for guest. */
 681        tlbe = &vcpu_e500->guest_tlb[1][0];
 682        tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOKE_PAGESZ_256M);
 683        tlbe->mas2 = 0;
 684        tlbe->mas3 = E500_TLB_SUPER_PERM_MASK;
 685        tlbe->mas7 = 0;
 686
 687        /* 4K map for serial output. Used by kernel wrapper. */
 688        tlbe = &vcpu_e500->guest_tlb[1][1];
 689        tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOKE_PAGESZ_4K);
 690        tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
 691        tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
 692        tlbe->mas7 = 0;
 693}
 694
 695int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
 696{
 697        tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF;
 698
 699        vcpu_e500->guest_tlb_size[0] = KVM_E500_TLB0_SIZE;
 700        vcpu_e500->guest_tlb[0] =
 701                kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
 702        if (vcpu_e500->guest_tlb[0] == NULL)
 703                goto err_out;
 704
 705        vcpu_e500->shadow_tlb_size[0] = KVM_E500_TLB0_SIZE;
 706        vcpu_e500->shadow_tlb[0] =
 707                kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
 708        if (vcpu_e500->shadow_tlb[0] == NULL)
 709                goto err_out_guest0;
 710
 711        vcpu_e500->guest_tlb_size[1] = KVM_E500_TLB1_SIZE;
 712        vcpu_e500->guest_tlb[1] =
 713                kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
 714        if (vcpu_e500->guest_tlb[1] == NULL)
 715                goto err_out_shadow0;
 716
 717        vcpu_e500->shadow_tlb_size[1] = tlb1_entry_num;
 718        vcpu_e500->shadow_tlb[1] =
 719                kzalloc(sizeof(struct tlbe) * tlb1_entry_num, GFP_KERNEL);
 720        if (vcpu_e500->shadow_tlb[1] == NULL)
 721                goto err_out_guest1;
 722
 723        vcpu_e500->shadow_pages[0] = (struct page **)
 724                kzalloc(sizeof(struct page *) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
 725        if (vcpu_e500->shadow_pages[0] == NULL)
 726                goto err_out_shadow1;
 727
 728        vcpu_e500->shadow_pages[1] = (struct page **)
 729                kzalloc(sizeof(struct page *) * tlb1_entry_num, GFP_KERNEL);
 730        if (vcpu_e500->shadow_pages[1] == NULL)
 731                goto err_out_page0;
 732
 733        return 0;
 734
 735err_out_page0:
 736        kfree(vcpu_e500->shadow_pages[0]);
 737err_out_shadow1:
 738        kfree(vcpu_e500->shadow_tlb[1]);
 739err_out_guest1:
 740        kfree(vcpu_e500->guest_tlb[1]);
 741err_out_shadow0:
 742        kfree(vcpu_e500->shadow_tlb[0]);
 743err_out_guest0:
 744        kfree(vcpu_e500->guest_tlb[0]);
 745err_out:
 746        return -1;
 747}
 748
 749void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
 750{
 751        kfree(vcpu_e500->shadow_pages[1]);
 752        kfree(vcpu_e500->shadow_pages[0]);
 753        kfree(vcpu_e500->shadow_tlb[1]);
 754        kfree(vcpu_e500->guest_tlb[1]);
 755        kfree(vcpu_e500->shadow_tlb[0]);
 756        kfree(vcpu_e500->guest_tlb[0]);
 757}
 758