linux/arch/x86/kvm/paging_tmpl.h
<<
>>
Prefs
   1/*
   2 * Kernel-based Virtual Machine driver for Linux
   3 *
   4 * This module enables machines with Intel VT-x extensions to run virtual
   5 * machines without emulation or binary translation.
   6 *
   7 * MMU support
   8 *
   9 * Copyright (C) 2006 Qumranet, Inc.
  10 *
  11 * Authors:
  12 *   Yaniv Kamay  <yaniv@qumranet.com>
  13 *   Avi Kivity   <avi@qumranet.com>
  14 *
  15 * This work is licensed under the terms of the GNU GPL, version 2.  See
  16 * the COPYING file in the top-level directory.
  17 *
  18 */
  19
  20/*
  21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
  22 * so the code in this file is compiled twice, once per pte size.
  23 */
  24
  25#if PTTYPE == 64
  26        #define pt_element_t u64
  27        #define guest_walker guest_walker64
  28        #define shadow_walker shadow_walker64
  29        #define FNAME(name) paging##64_##name
  30        #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
  31        #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
  32        #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
  33        #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
  34        #define PT_LEVEL_BITS PT64_LEVEL_BITS
  35        #ifdef CONFIG_X86_64
  36        #define PT_MAX_FULL_LEVELS 4
  37        #define CMPXCHG cmpxchg
  38        #else
  39        #define CMPXCHG cmpxchg64
  40        #define PT_MAX_FULL_LEVELS 2
  41        #endif
  42#elif PTTYPE == 32
  43        #define pt_element_t u32
  44        #define guest_walker guest_walker32
  45        #define shadow_walker shadow_walker32
  46        #define FNAME(name) paging##32_##name
  47        #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
  48        #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
  49        #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
  50        #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
  51        #define PT_LEVEL_BITS PT32_LEVEL_BITS
  52        #define PT_MAX_FULL_LEVELS 2
  53        #define CMPXCHG cmpxchg
  54#else
  55        #error Invalid PTTYPE value
  56#endif
  57
  58#define gpte_to_gfn FNAME(gpte_to_gfn)
  59#define gpte_to_gfn_pde FNAME(gpte_to_gfn_pde)
  60
  61/*
  62 * The guest_walker structure emulates the behavior of the hardware page
  63 * table walker.
  64 */
  65struct guest_walker {
  66        int level;
  67        gfn_t table_gfn[PT_MAX_FULL_LEVELS];
  68        pt_element_t ptes[PT_MAX_FULL_LEVELS];
  69        gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
  70        unsigned pt_access;
  71        unsigned pte_access;
  72        gfn_t gfn;
  73        u32 error_code;
  74};
  75
  76struct shadow_walker {
  77        struct kvm_shadow_walk walker;
  78        struct guest_walker *guest_walker;
  79        int user_fault;
  80        int write_fault;
  81        int largepage;
  82        int *ptwrite;
  83        pfn_t pfn;
  84        u64 *sptep;
  85};
  86
  87static gfn_t gpte_to_gfn(pt_element_t gpte)
  88{
  89        return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
  90}
  91
  92static gfn_t gpte_to_gfn_pde(pt_element_t gpte)
  93{
  94        return (gpte & PT_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
  95}
  96
  97static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
  98                         gfn_t table_gfn, unsigned index,
  99                         pt_element_t orig_pte, pt_element_t new_pte)
 100{
 101        pt_element_t ret;
 102        pt_element_t *table;
 103        struct page *page;
 104
 105        page = gfn_to_page(kvm, table_gfn);
 106
 107        table = kmap_atomic(page, KM_USER0);
 108        ret = CMPXCHG(&table[index], orig_pte, new_pte);
 109        kunmap_atomic(table, KM_USER0);
 110
 111        kvm_release_page_dirty(page);
 112
 113        return (ret != orig_pte);
 114}
 115
 116static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
 117{
 118        unsigned access;
 119
 120        access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
 121#if PTTYPE == 64
 122        if (is_nx(vcpu))
 123                access &= ~(gpte >> PT64_NX_SHIFT);
 124#endif
 125        return access;
 126}
 127
 128/*
 129 * Fetch a guest pte for a guest virtual address
 130 */
 131static int FNAME(walk_addr)(struct guest_walker *walker,
 132                            struct kvm_vcpu *vcpu, gva_t addr,
 133                            int write_fault, int user_fault, int fetch_fault)
 134{
 135        pt_element_t pte;
 136        gfn_t table_gfn;
 137        unsigned index, pt_access, pte_access;
 138        gpa_t pte_gpa;
 139
 140        pgprintk("%s: addr %lx\n", __func__, addr);
 141walk:
 142        walker->level = vcpu->arch.mmu.root_level;
 143        pte = vcpu->arch.cr3;
 144#if PTTYPE == 64
 145        if (!is_long_mode(vcpu)) {
 146                pte = vcpu->arch.pdptrs[(addr >> 30) & 3];
 147                if (!is_present_pte(pte))
 148                        goto not_present;
 149                --walker->level;
 150        }
 151#endif
 152        ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
 153               (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
 154
 155        pt_access = ACC_ALL;
 156
 157        for (;;) {
 158                index = PT_INDEX(addr, walker->level);
 159
 160                table_gfn = gpte_to_gfn(pte);
 161                pte_gpa = gfn_to_gpa(table_gfn);
 162                pte_gpa += index * sizeof(pt_element_t);
 163                walker->table_gfn[walker->level - 1] = table_gfn;
 164                walker->pte_gpa[walker->level - 1] = pte_gpa;
 165                pgprintk("%s: table_gfn[%d] %lx\n", __func__,
 166                         walker->level - 1, table_gfn);
 167
 168                kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
 169
 170                if (!is_present_pte(pte))
 171                        goto not_present;
 172
 173                if (write_fault && !is_writeble_pte(pte))
 174                        if (user_fault || is_write_protection(vcpu))
 175                                goto access_error;
 176
 177                if (user_fault && !(pte & PT_USER_MASK))
 178                        goto access_error;
 179
 180#if PTTYPE == 64
 181                if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK))
 182                        goto access_error;
 183#endif
 184
 185                if (!(pte & PT_ACCESSED_MASK)) {
 186                        mark_page_dirty(vcpu->kvm, table_gfn);
 187                        if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn,
 188                            index, pte, pte|PT_ACCESSED_MASK))
 189                                goto walk;
 190                        pte |= PT_ACCESSED_MASK;
 191                }
 192
 193                pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
 194
 195                walker->ptes[walker->level - 1] = pte;
 196
 197                if (walker->level == PT_PAGE_TABLE_LEVEL) {
 198                        walker->gfn = gpte_to_gfn(pte);
 199                        break;
 200                }
 201
 202                if (walker->level == PT_DIRECTORY_LEVEL
 203                    && (pte & PT_PAGE_SIZE_MASK)
 204                    && (PTTYPE == 64 || is_pse(vcpu))) {
 205                        walker->gfn = gpte_to_gfn_pde(pte);
 206                        walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
 207                        if (PTTYPE == 32 && is_cpuid_PSE36())
 208                                walker->gfn += pse36_gfn_delta(pte);
 209                        break;
 210                }
 211
 212                pt_access = pte_access;
 213                --walker->level;
 214        }
 215
 216        if (write_fault && !is_dirty_pte(pte)) {
 217                bool ret;
 218
 219                mark_page_dirty(vcpu->kvm, table_gfn);
 220                ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
 221                            pte|PT_DIRTY_MASK);
 222                if (ret)
 223                        goto walk;
 224                pte |= PT_DIRTY_MASK;
 225                kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte));
 226                walker->ptes[walker->level - 1] = pte;
 227        }
 228
 229        walker->pt_access = pt_access;
 230        walker->pte_access = pte_access;
 231        pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
 232                 __func__, (u64)pte, pt_access, pte_access);
 233        return 1;
 234
 235not_present:
 236        walker->error_code = 0;
 237        goto err;
 238
 239access_error:
 240        walker->error_code = PFERR_PRESENT_MASK;
 241
 242err:
 243        if (write_fault)
 244                walker->error_code |= PFERR_WRITE_MASK;
 245        if (user_fault)
 246                walker->error_code |= PFERR_USER_MASK;
 247        if (fetch_fault)
 248                walker->error_code |= PFERR_FETCH_MASK;
 249        return 0;
 250}
 251
 252static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
 253                              u64 *spte, const void *pte)
 254{
 255        pt_element_t gpte;
 256        unsigned pte_access;
 257        pfn_t pfn;
 258        int largepage = vcpu->arch.update_pte.largepage;
 259
 260        gpte = *(const pt_element_t *)pte;
 261        if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
 262                if (!is_present_pte(gpte))
 263                        set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
 264                return;
 265        }
 266        pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
 267        pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
 268        if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
 269                return;
 270        pfn = vcpu->arch.update_pte.pfn;
 271        if (is_error_pfn(pfn))
 272                return;
 273        if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
 274                return;
 275        kvm_get_pfn(pfn);
 276        mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
 277                     gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte),
 278                     pfn, true);
 279}
 280
 281/*
 282 * Fetch a shadow pte for a specific level in the paging hierarchy.
 283 */
 284static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
 285                                    struct kvm_vcpu *vcpu, u64 addr,
 286                                    u64 *sptep, int level)
 287{
 288        struct shadow_walker *sw =
 289                container_of(_sw, struct shadow_walker, walker);
 290        struct guest_walker *gw = sw->guest_walker;
 291        unsigned access = gw->pt_access;
 292        struct kvm_mmu_page *shadow_page;
 293        u64 spte;
 294        int metaphysical;
 295        gfn_t table_gfn;
 296        int r;
 297        pt_element_t curr_pte;
 298
 299        if (level == PT_PAGE_TABLE_LEVEL
 300            || (sw->largepage && level == PT_DIRECTORY_LEVEL)) {
 301                mmu_set_spte(vcpu, sptep, access, gw->pte_access & access,
 302                             sw->user_fault, sw->write_fault,
 303                             gw->ptes[gw->level-1] & PT_DIRTY_MASK,
 304                             sw->ptwrite, sw->largepage, gw->gfn, sw->pfn,
 305                             false);
 306                sw->sptep = sptep;
 307                return 1;
 308        }
 309
 310        if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
 311                return 0;
 312
 313        if (is_large_pte(*sptep)) {
 314                set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
 315                kvm_flush_remote_tlbs(vcpu->kvm);
 316                rmap_remove(vcpu->kvm, sptep);
 317        }
 318
 319        if (level == PT_DIRECTORY_LEVEL && gw->level == PT_DIRECTORY_LEVEL) {
 320                metaphysical = 1;
 321                if (!is_dirty_pte(gw->ptes[level - 1]))
 322                        access &= ~ACC_WRITE_MASK;
 323                table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
 324        } else {
 325                metaphysical = 0;
 326                table_gfn = gw->table_gfn[level - 2];
 327        }
 328        shadow_page = kvm_mmu_get_page(vcpu, table_gfn, (gva_t)addr, level-1,
 329                                       metaphysical, access, sptep);
 330        if (!metaphysical) {
 331                r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2],
 332                                          &curr_pte, sizeof(curr_pte));
 333                if (r || curr_pte != gw->ptes[level - 2]) {
 334                        kvm_mmu_put_page(shadow_page, sptep);
 335                        kvm_release_pfn_clean(sw->pfn);
 336                        sw->sptep = NULL;
 337                        return 1;
 338                }
 339        }
 340
 341        spte = __pa(shadow_page->spt) | PT_PRESENT_MASK | PT_ACCESSED_MASK
 342                | PT_WRITABLE_MASK | PT_USER_MASK;
 343        *sptep = spte;
 344        return 0;
 345}
 346
 347static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 348                         struct guest_walker *guest_walker,
 349                         int user_fault, int write_fault, int largepage,
 350                         int *ptwrite, pfn_t pfn)
 351{
 352        struct shadow_walker walker = {
 353                .walker = { .entry = FNAME(shadow_walk_entry), },
 354                .guest_walker = guest_walker,
 355                .user_fault = user_fault,
 356                .write_fault = write_fault,
 357                .largepage = largepage,
 358                .ptwrite = ptwrite,
 359                .pfn = pfn,
 360        };
 361
 362        if (!is_present_pte(guest_walker->ptes[guest_walker->level - 1]))
 363                return NULL;
 364
 365        walk_shadow(&walker.walker, vcpu, addr);
 366
 367        return walker.sptep;
 368}
 369
 370/*
 371 * Page fault handler.  There are several causes for a page fault:
 372 *   - there is no shadow pte for the guest pte
 373 *   - write access through a shadow pte marked read only so that we can set
 374 *     the dirty bit
 375 *   - write access to a shadow pte marked read only so we can update the page
 376 *     dirty bitmap, when userspace requests it
 377 *   - mmio access; in this case we will never install a present shadow pte
 378 *   - normal guest page fault due to the guest pte marked not present, not
 379 *     writable, or not executable
 380 *
 381 *  Returns: 1 if we need to emulate the instruction, 0 otherwise, or
 382 *           a negative value on error.
 383 */
 384static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
 385                               u32 error_code)
 386{
 387        int write_fault = error_code & PFERR_WRITE_MASK;
 388        int user_fault = error_code & PFERR_USER_MASK;
 389        int fetch_fault = error_code & PFERR_FETCH_MASK;
 390        struct guest_walker walker;
 391        u64 *shadow_pte;
 392        int write_pt = 0;
 393        int r;
 394        pfn_t pfn;
 395        int largepage = 0;
 396        unsigned long mmu_seq;
 397
 398        pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
 399        kvm_mmu_audit(vcpu, "pre page fault");
 400
 401        r = mmu_topup_memory_caches(vcpu);
 402        if (r)
 403                return r;
 404
 405        /*
 406         * Look up the shadow pte for the faulting address.
 407         */
 408        r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
 409                             fetch_fault);
 410
 411        /*
 412         * The page is not mapped by the guest.  Let the guest handle it.
 413         */
 414        if (!r) {
 415                pgprintk("%s: guest page fault\n", __func__);
 416                inject_page_fault(vcpu, addr, walker.error_code);
 417                vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
 418                return 0;
 419        }
 420
 421        if (walker.level == PT_DIRECTORY_LEVEL) {
 422                gfn_t large_gfn;
 423                large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
 424                if (is_largepage_backed(vcpu, large_gfn)) {
 425                        walker.gfn = large_gfn;
 426                        largepage = 1;
 427                }
 428        }
 429        mmu_seq = vcpu->kvm->mmu_notifier_seq;
 430        smp_rmb();
 431        pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
 432
 433        /* mmio */
 434        if (is_error_pfn(pfn)) {
 435                pgprintk("gfn %lx is mmio\n", walker.gfn);
 436                kvm_release_pfn_clean(pfn);
 437                return 1;
 438        }
 439
 440        spin_lock(&vcpu->kvm->mmu_lock);
 441        if (mmu_notifier_retry(vcpu, mmu_seq))
 442                goto out_unlock;
 443        kvm_mmu_free_some_pages(vcpu);
 444        shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
 445                                  largepage, &write_pt, pfn);
 446
 447        pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
 448                 shadow_pte, *shadow_pte, write_pt);
 449
 450        if (!write_pt)
 451                vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
 452
 453        ++vcpu->stat.pf_fixed;
 454        kvm_mmu_audit(vcpu, "post page fault (fixed)");
 455        spin_unlock(&vcpu->kvm->mmu_lock);
 456
 457        return write_pt;
 458
 459out_unlock:
 460        spin_unlock(&vcpu->kvm->mmu_lock);
 461        kvm_release_pfn_clean(pfn);
 462        return 0;
 463}
 464
 465static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw,
 466                                      struct kvm_vcpu *vcpu, u64 addr,
 467                                      u64 *sptep, int level)
 468{
 469
 470        if (level == PT_PAGE_TABLE_LEVEL) {
 471                if (is_shadow_present_pte(*sptep))
 472                        rmap_remove(vcpu->kvm, sptep);
 473                set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
 474                return 1;
 475        }
 476        if (!is_shadow_present_pte(*sptep))
 477                return 1;
 478        return 0;
 479}
 480
 481static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 482{
 483        struct shadow_walker walker = {
 484                .walker = { .entry = FNAME(shadow_invlpg_entry), },
 485        };
 486
 487        walk_shadow(&walker.walker, vcpu, gva);
 488}
 489
 490static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
 491{
 492        struct guest_walker walker;
 493        gpa_t gpa = UNMAPPED_GVA;
 494        int r;
 495
 496        r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
 497
 498        if (r) {
 499                gpa = gfn_to_gpa(walker.gfn);
 500                gpa |= vaddr & ~PAGE_MASK;
 501        }
 502
 503        return gpa;
 504}
 505
 506static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
 507                                 struct kvm_mmu_page *sp)
 508{
 509        int i, j, offset, r;
 510        pt_element_t pt[256 / sizeof(pt_element_t)];
 511        gpa_t pte_gpa;
 512
 513        if (sp->role.metaphysical
 514            || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
 515                nonpaging_prefetch_page(vcpu, sp);
 516                return;
 517        }
 518
 519        pte_gpa = gfn_to_gpa(sp->gfn);
 520        if (PTTYPE == 32) {
 521                offset = sp->role.quadrant << PT64_LEVEL_BITS;
 522                pte_gpa += offset * sizeof(pt_element_t);
 523        }
 524
 525        for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
 526                r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
 527                pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
 528                for (j = 0; j < ARRAY_SIZE(pt); ++j)
 529                        if (r || is_present_pte(pt[j]))
 530                                sp->spt[i+j] = shadow_trap_nonpresent_pte;
 531                        else
 532                                sp->spt[i+j] = shadow_notrap_nonpresent_pte;
 533        }
 534}
 535
 536/*
 537 * Using the cached information from sp->gfns is safe because:
 538 * - The spte has a reference to the struct page, so the pfn for a given gfn
 539 *   can't change unless all sptes pointing to it are nuked first.
 540 * - Alias changes zap the entire shadow cache.
 541 */
 542static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 543{
 544        int i, offset, nr_present;
 545
 546        offset = nr_present = 0;
 547
 548        if (PTTYPE == 32)
 549                offset = sp->role.quadrant << PT64_LEVEL_BITS;
 550
 551        for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
 552                unsigned pte_access;
 553                pt_element_t gpte;
 554                gpa_t pte_gpa;
 555                gfn_t gfn = sp->gfns[i];
 556
 557                if (!is_shadow_present_pte(sp->spt[i]))
 558                        continue;
 559
 560                pte_gpa = gfn_to_gpa(sp->gfn);
 561                pte_gpa += (i+offset) * sizeof(pt_element_t);
 562
 563                if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
 564                                          sizeof(pt_element_t)))
 565                        return -EINVAL;
 566
 567                if (gpte_to_gfn(gpte) != gfn || !is_present_pte(gpte) ||
 568                    !(gpte & PT_ACCESSED_MASK)) {
 569                        u64 nonpresent;
 570
 571                        rmap_remove(vcpu->kvm, &sp->spt[i]);
 572                        if (is_present_pte(gpte))
 573                                nonpresent = shadow_trap_nonpresent_pte;
 574                        else
 575                                nonpresent = shadow_notrap_nonpresent_pte;
 576                        set_shadow_pte(&sp->spt[i], nonpresent);
 577                        continue;
 578                }
 579
 580                nr_present++;
 581                pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
 582                set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
 583                         is_dirty_pte(gpte), 0, gfn,
 584                         spte_to_pfn(sp->spt[i]), true, false);
 585        }
 586
 587        return !nr_present;
 588}
 589
 590#undef pt_element_t
 591#undef guest_walker
 592#undef shadow_walker
 593#undef FNAME
 594#undef PT_BASE_ADDR_MASK
 595#undef PT_INDEX
 596#undef PT_LEVEL_MASK
 597#undef PT_DIR_BASE_ADDR_MASK
 598#undef PT_LEVEL_BITS
 599#undef PT_MAX_FULL_LEVELS
 600#undef gpte_to_gfn
 601#undef gpte_to_gfn_pde
 602#undef CMPXCHG
 603