linux/arch/powerpc/kvm/44x_tlb.c
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License, version 2, as
   4 * published by the Free Software Foundation.
   5 *
   6 * This program is distributed in the hope that it will be useful,
   7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   9 * GNU General Public License for more details.
  10 *
  11 * You should have received a copy of the GNU General Public License
  12 * along with this program; if not, write to the Free Software
  13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  14 *
  15 * Copyright IBM Corp. 2007
  16 *
  17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18 */
  19
  20#include <linux/types.h>
  21#include <linux/string.h>
  22#include <linux/kvm.h>
  23#include <linux/kvm_host.h>
  24#include <linux/highmem.h>
  25#include <asm/mmu-44x.h>
  26#include <asm/kvm_ppc.h>
  27
  28#include "44x_tlb.h"
  29
  30#define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
  31#define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
  32
  33static unsigned int kvmppc_tlb_44x_pos;
  34
  35static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
  36{
  37        /* Mask off reserved bits. */
  38        attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_ATTR_MASK;
  39
  40        if (!usermode) {
  41                /* Guest is in supervisor mode, so we need to translate guest
  42                 * supervisor permissions into user permissions. */
  43                attrib &= ~PPC44x_TLB_USER_PERM_MASK;
  44                attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3;
  45        }
  46
  47        /* Make sure host can always access this memory. */
  48        attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;
  49
  50        return attrib;
  51}
  52
  53/* Search the guest TLB for a matching entry. */
  54int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
  55                         unsigned int as)
  56{
  57        int i;
  58
  59        /* XXX Replace loop with fancy data structures. */
  60        for (i = 0; i < PPC44x_TLB_SIZE; i++) {
  61                struct tlbe *tlbe = &vcpu->arch.guest_tlb[i];
  62                unsigned int tid;
  63
  64                if (eaddr < get_tlb_eaddr(tlbe))
  65                        continue;
  66
  67                if (eaddr > get_tlb_end(tlbe))
  68                        continue;
  69
  70                tid = get_tlb_tid(tlbe);
  71                if (tid && (tid != pid))
  72                        continue;
  73
  74                if (!get_tlb_v(tlbe))
  75                        continue;
  76
  77                if (get_tlb_ts(tlbe) != as)
  78                        continue;
  79
  80                return i;
  81        }
  82
  83        return -1;
  84}
  85
  86struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
  87{
  88        unsigned int as = !!(vcpu->arch.msr & MSR_IS);
  89        unsigned int index;
  90
  91        index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
  92        if (index == -1)
  93                return NULL;
  94        return &vcpu->arch.guest_tlb[index];
  95}
  96
  97struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
  98{
  99        unsigned int as = !!(vcpu->arch.msr & MSR_DS);
 100        unsigned int index;
 101
 102        index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
 103        if (index == -1)
 104                return NULL;
 105        return &vcpu->arch.guest_tlb[index];
 106}
 107
 108static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe)
 109{
 110        return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW);
 111}
 112
 113static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
 114                                      unsigned int index)
 115{
 116        struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index];
 117        struct page *page = vcpu->arch.shadow_pages[index];
 118
 119        if (get_tlb_v(stlbe)) {
 120                if (kvmppc_44x_tlbe_is_writable(stlbe))
 121                        kvm_release_page_dirty(page);
 122                else
 123                        kvm_release_page_clean(page);
 124        }
 125}
 126
 127void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu)
 128{
 129        int i;
 130
 131        for (i = 0; i <= tlb_44x_hwater; i++)
 132                kvmppc_44x_shadow_release(vcpu, i);
 133}
 134
 135void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i)
 136{
 137    vcpu->arch.shadow_tlb_mod[i] = 1;
 138}
 139
 140/* Caller must ensure that the specified guest TLB entry is safe to insert into
 141 * the shadow TLB. */
 142void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
 143                    u32 flags)
 144{
 145        struct page *new_page;
 146        struct tlbe *stlbe;
 147        hpa_t hpaddr;
 148        unsigned int victim;
 149
 150        /* Future optimization: don't overwrite the TLB entry containing the
 151         * current PC (or stack?). */
 152        victim = kvmppc_tlb_44x_pos++;
 153        if (kvmppc_tlb_44x_pos > tlb_44x_hwater)
 154                kvmppc_tlb_44x_pos = 0;
 155        stlbe = &vcpu->arch.shadow_tlb[victim];
 156
 157        /* Get reference to new page. */
 158        new_page = gfn_to_page(vcpu->kvm, gfn);
 159        if (is_error_page(new_page)) {
 160                printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
 161                kvm_release_page_clean(new_page);
 162                return;
 163        }
 164        hpaddr = page_to_phys(new_page);
 165
 166        /* Drop reference to old page. */
 167        kvmppc_44x_shadow_release(vcpu, victim);
 168
 169        vcpu->arch.shadow_pages[victim] = new_page;
 170
 171        /* XXX Make sure (va, size) doesn't overlap any other
 172         * entries. 440x6 user manual says the result would be
 173         * "undefined." */
 174
 175        /* XXX what about AS? */
 176
 177        stlbe->tid = !(asid & 0xff);
 178
 179        /* Force TS=1 for all guest mappings. */
 180        /* For now we hardcode 4KB mappings, but it will be important to
 181         * use host large pages in the future. */
 182        stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS
 183                       | PPC44x_TLB_4K;
 184        stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
 185        stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags,
 186                                                    vcpu->arch.msr & MSR_PR);
 187        kvmppc_tlbe_set_modified(vcpu, victim);
 188
 189        KVMTRACE_5D(STLB_WRITE, vcpu, victim,
 190                        stlbe->tid, stlbe->word0, stlbe->word1, stlbe->word2,
 191                        handler);
 192}
 193
 194void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
 195                           gva_t eend, u32 asid)
 196{
 197        unsigned int pid = !(asid & 0xff);
 198        int i;
 199
 200        /* XXX Replace loop with fancy data structures. */
 201        for (i = 0; i <= tlb_44x_hwater; i++) {
 202                struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
 203                unsigned int tid;
 204
 205                if (!get_tlb_v(stlbe))
 206                        continue;
 207
 208                if (eend < get_tlb_eaddr(stlbe))
 209                        continue;
 210
 211                if (eaddr > get_tlb_end(stlbe))
 212                        continue;
 213
 214                tid = get_tlb_tid(stlbe);
 215                if (tid && (tid != pid))
 216                        continue;
 217
 218                kvmppc_44x_shadow_release(vcpu, i);
 219                stlbe->word0 = 0;
 220                kvmppc_tlbe_set_modified(vcpu, i);
 221                KVMTRACE_5D(STLB_INVAL, vcpu, i,
 222                                stlbe->tid, stlbe->word0, stlbe->word1,
 223                                stlbe->word2, handler);
 224        }
 225}
 226
 227/* Invalidate all mappings on the privilege switch after PID has been changed.
 228 * The guest always runs with PID=1, so we must clear the entire TLB when
 229 * switching address spaces. */
 230void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
 231{
 232        int i;
 233
 234        if (vcpu->arch.swap_pid) {
 235                /* XXX Replace loop with fancy data structures. */
 236                for (i = 0; i <= tlb_44x_hwater; i++) {
 237                        struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
 238
 239                        /* Future optimization: clear only userspace mappings. */
 240                        kvmppc_44x_shadow_release(vcpu, i);
 241                        stlbe->word0 = 0;
 242                        kvmppc_tlbe_set_modified(vcpu, i);
 243                        KVMTRACE_5D(STLB_INVAL, vcpu, i,
 244                                    stlbe->tid, stlbe->word0, stlbe->word1,
 245                                    stlbe->word2, handler);
 246                }
 247                vcpu->arch.swap_pid = 0;
 248        }
 249
 250        vcpu->arch.shadow_pid = !usermode;
 251}
 252