linux/arch/powerpc/include/asm/kvm_book3s_64.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 *
   4 * Copyright SUSE Linux Products GmbH 2010
   5 *
   6 * Authors: Alexander Graf <agraf@suse.de>
   7 */
   8
   9#ifndef __ASM_KVM_BOOK3S_64_H__
  10#define __ASM_KVM_BOOK3S_64_H__
  11
  12#include <linux/string.h>
  13#include <asm/bitops.h>
  14#include <asm/book3s/64/mmu-hash.h>
  15#include <asm/cpu_has_feature.h>
  16#include <asm/ppc-opcode.h>
  17#include <asm/pte-walk.h>
  18
  19#ifdef CONFIG_PPC_PSERIES
  20static inline bool kvmhv_on_pseries(void)
  21{
  22        return !cpu_has_feature(CPU_FTR_HVMODE);
  23}
  24#else
  25static inline bool kvmhv_on_pseries(void)
  26{
  27        return false;
  28}
  29#endif
  30
  31/*
  32 * Structure for a nested guest, that is, for a guest that is managed by
  33 * one of our guests.
  34 */
  35struct kvm_nested_guest {
  36        struct kvm *l1_host;            /* L1 VM that owns this nested guest */
  37        int l1_lpid;                    /* lpid L1 guest thinks this guest is */
  38        int shadow_lpid;                /* real lpid of this nested guest */
  39        pgd_t *shadow_pgtable;          /* our page table for this guest */
  40        u64 l1_gr_to_hr;                /* L1's addr of part'n-scoped table */
  41        u64 process_table;              /* process table entry for this guest */
  42        long refcnt;                    /* number of pointers to this struct */
  43        struct mutex tlb_lock;          /* serialize page faults and tlbies */
  44        struct kvm_nested_guest *next;
  45        cpumask_t need_tlb_flush;
  46        cpumask_t cpu_in_guest;
  47        short prev_cpu[NR_CPUS];
  48        u8 radix;                       /* is this nested guest radix */
  49};
  50
  51/*
  52 * We define a nested rmap entry as a single 64-bit quantity
  53 * 0xFFF0000000000000   12-bit lpid field
  54 * 0x000FFFFFFFFFF000   40-bit guest 4k page frame number
  55 * 0x0000000000000001   1-bit  single entry flag
  56 */
  57#define RMAP_NESTED_LPID_MASK           0xFFF0000000000000UL
  58#define RMAP_NESTED_LPID_SHIFT          (52)
  59#define RMAP_NESTED_GPA_MASK            0x000FFFFFFFFFF000UL
  60#define RMAP_NESTED_IS_SINGLE_ENTRY     0x0000000000000001UL
  61
  62/* Structure for a nested guest rmap entry */
  63struct rmap_nested {
  64        struct llist_node list;
  65        u64 rmap;
  66};
  67
  68/*
  69 * for_each_nest_rmap_safe - iterate over the list of nested rmap entries
  70 *                           safe against removal of the list entry or NULL list
  71 * @pos:        a (struct rmap_nested *) to use as a loop cursor
  72 * @node:       pointer to the first entry
  73 *              NOTE: this can be NULL
  74 * @rmapp:      an (unsigned long *) in which to return the rmap entries on each
  75 *              iteration
  76 *              NOTE: this must point to already allocated memory
  77 *
  78 * The nested_rmap is a llist of (struct rmap_nested) entries pointed to by the
  79 * rmap entry in the memslot. The list is always terminated by a "single entry"
  80 * stored in the list element of the final entry of the llist. If there is ONLY
  81 * a single entry then this is itself in the rmap entry of the memslot, not a
  82 * llist head pointer.
  83 *
  84 * Note that the iterator below assumes that a nested rmap entry is always
  85 * non-zero.  This is true for our usage because the LPID field is always
  86 * non-zero (zero is reserved for the host).
  87 *
  88 * This should be used to iterate over the list of rmap_nested entries with
  89 * processing done on the u64 rmap value given by each iteration. This is safe
  90 * against removal of list entries and it is always safe to call free on (pos).
  91 *
  92 * e.g.
  93 * struct rmap_nested *cursor;
  94 * struct llist_node *first;
  95 * unsigned long rmap;
  96 * for_each_nest_rmap_safe(cursor, first, &rmap) {
  97 *      do_something(rmap);
  98 *      free(cursor);
  99 * }
 100 */
 101#define for_each_nest_rmap_safe(pos, node, rmapp)                              \
 102        for ((pos) = llist_entry((node), typeof(*(pos)), list);                \
 103             (node) &&                                                         \
 104             (*(rmapp) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ?     \
 105                          ((u64) (node)) : ((pos)->rmap))) &&                  \
 106             (((node) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ?      \
 107                         ((struct llist_node *) ((pos) = NULL)) :              \
 108                         (pos)->list.next)), true);                            \
 109             (pos) = llist_entry((node), typeof(*(pos)), list))
 110
 111struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
 112                                          bool create);
 113void kvmhv_put_nested(struct kvm_nested_guest *gp);
 114int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid);
 115
 116/* Encoding of first parameter for H_TLB_INVALIDATE */
 117#define H_TLBIE_P1_ENC(ric, prs, r)     (___PPC_RIC(ric) | ___PPC_PRS(prs) | \
 118                                         ___PPC_R(r))
 119
 120/* Power architecture requires HPT is at least 256kiB, at most 64TiB */
 121#define PPC_MIN_HPT_ORDER       18
 122#define PPC_MAX_HPT_ORDER       46
 123
 124#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
 125static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
 126{
 127        preempt_disable();
 128        return &get_paca()->shadow_vcpu;
 129}
 130
 131static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
 132{
 133        preempt_enable();
 134}
 135#endif
 136
 137#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 138
 139static inline bool kvm_is_radix(struct kvm *kvm)
 140{
 141        return kvm->arch.radix;
 142}
 143
 144static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu)
 145{
 146        bool radix;
 147
 148        if (vcpu->arch.nested)
 149                radix = vcpu->arch.nested->radix;
 150        else
 151                radix = kvm_is_radix(vcpu->kvm);
 152
 153        return radix;
 154}
 155
 156int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr);
 157
 158#define KVM_DEFAULT_HPT_ORDER   24      /* 16MB HPT by default */
 159#endif
 160
 161/*
 162 * Invalid HDSISR value which is used to indicate when HW has not set the reg.
 163 * Used to work around an errata.
 164 */
 165#define HDSISR_CANARY   0x7fff
 166
 167/*
 168 * We use a lock bit in HPTE dword 0 to synchronize updates and
 169 * accesses to each HPTE, and another bit to indicate non-present
 170 * HPTEs.
 171 */
 172#define HPTE_V_HVLOCK   0x40UL
 173#define HPTE_V_ABSENT   0x20UL
 174
 175/*
 176 * We use this bit in the guest_rpte field of the revmap entry
 177 * to indicate a modified HPTE.
 178 */
 179#define HPTE_GR_MODIFIED        (1ul << 62)
 180
 181/* These bits are reserved in the guest view of the HPTE */
 182#define HPTE_GR_RESERVED        HPTE_GR_MODIFIED
 183
 184static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
 185{
 186        unsigned long tmp, old;
 187        __be64 be_lockbit, be_bits;
 188
 189        /*
 190         * We load/store in native endian, but the HTAB is in big endian. If
 191         * we byte swap all data we apply on the PTE we're implicitly correct
 192         * again.
 193         */
 194        be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
 195        be_bits = cpu_to_be64(bits);
 196
 197        asm volatile("  ldarx   %0,0,%2\n"
 198                     "  and.    %1,%0,%3\n"
 199                     "  bne     2f\n"
 200                     "  or      %0,%0,%4\n"
 201                     "  stdcx.  %0,0,%2\n"
 202                     "  beq+    2f\n"
 203                     "  mr      %1,%3\n"
 204                     "2:        isync"
 205                     : "=&r" (tmp), "=&r" (old)
 206                     : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
 207                     : "cc", "memory");
 208        return old == 0;
 209}
 210
 211static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
 212{
 213        hpte_v &= ~HPTE_V_HVLOCK;
 214        asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
 215        hpte[0] = cpu_to_be64(hpte_v);
 216}
 217
 218/* Without barrier */
 219static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
 220{
 221        hpte_v &= ~HPTE_V_HVLOCK;
 222        hpte[0] = cpu_to_be64(hpte_v);
 223}
 224
 225/*
 226 * These functions encode knowledge of the POWER7/8/9 hardware
 227 * interpretations of the HPTE LP (large page size) field.
 228 */
 229static inline int kvmppc_hpte_page_shifts(unsigned long h, unsigned long l)
 230{
 231        unsigned int lphi;
 232
 233        if (!(h & HPTE_V_LARGE))
 234                return 12;      /* 4kB */
 235        lphi = (l >> 16) & 0xf;
 236        switch ((l >> 12) & 0xf) {
 237        case 0:
 238                return !lphi ? 24 : 0;          /* 16MB */
 239                break;
 240        case 1:
 241                return 16;                      /* 64kB */
 242                break;
 243        case 3:
 244                return !lphi ? 34 : 0;          /* 16GB */
 245                break;
 246        case 7:
 247                return (16 << 8) + 12;          /* 64kB in 4kB */
 248                break;
 249        case 8:
 250                if (!lphi)
 251                        return (24 << 8) + 16;  /* 16MB in 64kkB */
 252                if (lphi == 3)
 253                        return (24 << 8) + 12;  /* 16MB in 4kB */
 254                break;
 255        }
 256        return 0;
 257}
 258
 259static inline int kvmppc_hpte_base_page_shift(unsigned long h, unsigned long l)
 260{
 261        return kvmppc_hpte_page_shifts(h, l) & 0xff;
 262}
 263
 264static inline int kvmppc_hpte_actual_page_shift(unsigned long h, unsigned long l)
 265{
 266        int tmp = kvmppc_hpte_page_shifts(h, l);
 267
 268        if (tmp >= 0x100)
 269                tmp >>= 8;
 270        return tmp;
 271}
 272
 273static inline unsigned long kvmppc_actual_pgsz(unsigned long v, unsigned long r)
 274{
 275        int shift = kvmppc_hpte_actual_page_shift(v, r);
 276
 277        if (shift)
 278                return 1ul << shift;
 279        return 0;
 280}
 281
 282static inline int kvmppc_pgsize_lp_encoding(int base_shift, int actual_shift)
 283{
 284        switch (base_shift) {
 285        case 12:
 286                switch (actual_shift) {
 287                case 12:
 288                        return 0;
 289                case 16:
 290                        return 7;
 291                case 24:
 292                        return 0x38;
 293                }
 294                break;
 295        case 16:
 296                switch (actual_shift) {
 297                case 16:
 298                        return 1;
 299                case 24:
 300                        return 8;
 301                }
 302                break;
 303        case 24:
 304                return 0;
 305        }
 306        return -1;
 307}
 308
 309static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
 310                                             unsigned long pte_index)
 311{
 312        int a_pgshift, b_pgshift;
 313        unsigned long rb = 0, va_low, sllp;
 314
 315        b_pgshift = a_pgshift = kvmppc_hpte_page_shifts(v, r);
 316        if (a_pgshift >= 0x100) {
 317                b_pgshift &= 0xff;
 318                a_pgshift >>= 8;
 319        }
 320
 321        /*
 322         * Ignore the top 14 bits of va
 323         * v have top two bits covering segment size, hence move
 324         * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
 325         * AVA field in v also have the lower 23 bits ignored.
 326         * For base page size 4K we need 14 .. 65 bits (so need to
 327         * collect extra 11 bits)
 328         * For others we need 14..14+i
 329         */
 330        /* This covers 14..54 bits of va*/
 331        rb = (v & ~0x7fUL) << 16;               /* AVA field */
 332
 333        /*
 334         * AVA in v had cleared lower 23 bits. We need to derive
 335         * that from pteg index
 336         */
 337        va_low = pte_index >> 3;
 338        if (v & HPTE_V_SECONDARY)
 339                va_low = ~va_low;
 340        /*
 341         * get the vpn bits from va_low using reverse of hashing.
 342         * In v we have va with 23 bits dropped and then left shifted
 343         * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
 344         * right shift it with (SID_SHIFT - (23 - 7))
 345         */
 346        if (!(v & HPTE_V_1TB_SEG))
 347                va_low ^= v >> (SID_SHIFT - 16);
 348        else
 349                va_low ^= v >> (SID_SHIFT_1T - 16);
 350        va_low &= 0x7ff;
 351
 352        if (b_pgshift <= 12) {
 353                if (a_pgshift > 12) {
 354                        sllp = (a_pgshift == 16) ? 5 : 4;
 355                        rb |= sllp << 5;        /*  AP field */
 356                }
 357                rb |= (va_low & 0x7ff) << 12;   /* remaining 11 bits of AVA */
 358        } else {
 359                int aval_shift;
 360                /*
 361                 * remaining bits of AVA/LP fields
 362                 * Also contain the rr bits of LP
 363                 */
 364                rb |= (va_low << b_pgshift) & 0x7ff000;
 365                /*
 366                 * Now clear not needed LP bits based on actual psize
 367                 */
 368                rb &= ~((1ul << a_pgshift) - 1);
 369                /*
 370                 * AVAL field 58..77 - base_page_shift bits of va
 371                 * we have space for 58..64 bits, Missing bits should
 372                 * be zero filled. +1 is to take care of L bit shift
 373                 */
 374                aval_shift = 64 - (77 - b_pgshift) + 1;
 375                rb |= ((va_low << aval_shift) & 0xfe);
 376
 377                rb |= 1;                /* L field */
 378                rb |= r & 0xff000 & ((1ul << a_pgshift) - 1); /* LP field */
 379        }
 380        rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8;   /* B field */
 381        return rb;
 382}
 383
 384static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
 385{
 386        return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
 387}
 388
 389static inline int hpte_is_writable(unsigned long ptel)
 390{
 391        unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
 392
 393        return pp != PP_RXRX && pp != PP_RXXX;
 394}
 395
 396static inline unsigned long hpte_make_readonly(unsigned long ptel)
 397{
 398        if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
 399                ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
 400        else
 401                ptel |= PP_RXRX;
 402        return ptel;
 403}
 404
 405static inline bool hpte_cache_flags_ok(unsigned long hptel, bool is_ci)
 406{
 407        unsigned int wimg = hptel & HPTE_R_WIMG;
 408
 409        /* Handle SAO */
 410        if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
 411            cpu_has_feature(CPU_FTR_ARCH_206))
 412                wimg = HPTE_R_M;
 413
 414        if (!is_ci)
 415                return wimg == HPTE_R_M;
 416        /*
 417         * if host is mapped cache inhibited, make sure hptel also have
 418         * cache inhibited.
 419         */
 420        if (wimg & HPTE_R_W) /* FIXME!! is this ok for all guest. ? */
 421                return false;
 422        return !!(wimg & HPTE_R_I);
 423}
 424
 425/*
 426 * If it's present and writable, atomically set dirty and referenced bits and
 427 * return the PTE, otherwise return 0.
 428 */
 429static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
 430{
 431        pte_t old_pte, new_pte = __pte(0);
 432
 433        while (1) {
 434                /*
 435                 * Make sure we don't reload from ptep
 436                 */
 437                old_pte = READ_ONCE(*ptep);
 438                /*
 439                 * wait until H_PAGE_BUSY is clear then set it atomically
 440                 */
 441                if (unlikely(pte_val(old_pte) & H_PAGE_BUSY)) {
 442                        cpu_relax();
 443                        continue;
 444                }
 445                /* If pte is not present return None */
 446                if (unlikely(!pte_present(old_pte)))
 447                        return __pte(0);
 448
 449                new_pte = pte_mkyoung(old_pte);
 450                if (writing && pte_write(old_pte))
 451                        new_pte = pte_mkdirty(new_pte);
 452
 453                if (pte_xchg(ptep, old_pte, new_pte))
 454                        break;
 455        }
 456        return new_pte;
 457}
 458
 459static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
 460{
 461        if (key)
 462                return PP_RWRX <= pp && pp <= PP_RXRX;
 463        return true;
 464}
 465
 466static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
 467{
 468        if (key)
 469                return pp == PP_RWRW;
 470        return pp <= PP_RWRW;
 471}
 472
 473static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
 474{
 475        unsigned long skey;
 476
 477        skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
 478                ((hpte_r & HPTE_R_KEY_LO) >> 9);
 479        return (amr >> (62 - 2 * skey)) & 3;
 480}
 481
 482static inline void lock_rmap(unsigned long *rmap)
 483{
 484        do {
 485                while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
 486                        cpu_relax();
 487        } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
 488}
 489
 490static inline void unlock_rmap(unsigned long *rmap)
 491{
 492        __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
 493}
 494
 495static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
 496                                   unsigned long pagesize)
 497{
 498        unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
 499
 500        if (pagesize <= PAGE_SIZE)
 501                return true;
 502        return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
 503}
 504
 505/*
 506 * This works for 4k, 64k and 16M pages on POWER7,
 507 * and 4k and 16M pages on PPC970.
 508 */
 509static inline unsigned long slb_pgsize_encoding(unsigned long psize)
 510{
 511        unsigned long senc = 0;
 512
 513        if (psize > 0x1000) {
 514                senc = SLB_VSID_L;
 515                if (psize == 0x10000)
 516                        senc |= SLB_VSID_LP_01;
 517        }
 518        return senc;
 519}
 520
 521static inline int is_vrma_hpte(unsigned long hpte_v)
 522{
 523        return (hpte_v & ~0xffffffUL) ==
 524                (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
 525}
 526
 527#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 528/*
 529 * Note modification of an HPTE; set the HPTE modified bit
 530 * if anyone is interested.
 531 */
 532static inline void note_hpte_modification(struct kvm *kvm,
 533                                          struct revmap_entry *rev)
 534{
 535        if (atomic_read(&kvm->arch.hpte_mod_interest))
 536                rev->guest_rpte |= HPTE_GR_MODIFIED;
 537}
 538
 539/*
 540 * Like kvm_memslots(), but for use in real mode when we can't do
 541 * any RCU stuff (since the secondary threads are offline from the
 542 * kernel's point of view), and we can't print anything.
 543 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
 544 */
 545static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
 546{
 547        return rcu_dereference_raw_check(kvm->memslots[0]);
 548}
 549
 550extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
 551extern void kvmhv_radix_debugfs_init(struct kvm *kvm);
 552
 553extern void kvmhv_rm_send_ipi(int cpu);
 554
 555static inline unsigned long kvmppc_hpt_npte(struct kvm_hpt_info *hpt)
 556{
 557        /* HPTEs are 2**4 bytes long */
 558        return 1UL << (hpt->order - 4);
 559}
 560
 561static inline unsigned long kvmppc_hpt_mask(struct kvm_hpt_info *hpt)
 562{
 563        /* 128 (2**7) bytes in each HPTEG */
 564        return (1UL << (hpt->order - 7)) - 1;
 565}
 566
 567/* Set bits in a dirty bitmap, which is in LE format */
 568static inline void set_dirty_bits(unsigned long *map, unsigned long i,
 569                                  unsigned long npages)
 570{
 571
 572        if (npages >= 8)
 573                memset((char *)map + i / 8, 0xff, npages / 8);
 574        else
 575                for (; npages; ++i, --npages)
 576                        __set_bit_le(i, map);
 577}
 578
 579static inline void set_dirty_bits_atomic(unsigned long *map, unsigned long i,
 580                                         unsigned long npages)
 581{
 582        if (npages >= 8)
 583                memset((char *)map + i / 8, 0xff, npages / 8);
 584        else
 585                for (; npages; ++i, --npages)
 586                        set_bit_le(i, map);
 587}
 588
 589static inline u64 sanitize_msr(u64 msr)
 590{
 591        msr &= ~MSR_HV;
 592        msr |= MSR_ME;
 593        return msr;
 594}
 595
 596#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 597static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
 598{
 599        vcpu->arch.regs.ccr  = vcpu->arch.cr_tm;
 600        vcpu->arch.regs.xer = vcpu->arch.xer_tm;
 601        vcpu->arch.regs.link  = vcpu->arch.lr_tm;
 602        vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
 603        vcpu->arch.amr = vcpu->arch.amr_tm;
 604        vcpu->arch.ppr = vcpu->arch.ppr_tm;
 605        vcpu->arch.dscr = vcpu->arch.dscr_tm;
 606        vcpu->arch.tar = vcpu->arch.tar_tm;
 607        memcpy(vcpu->arch.regs.gpr, vcpu->arch.gpr_tm,
 608               sizeof(vcpu->arch.regs.gpr));
 609        vcpu->arch.fp  = vcpu->arch.fp_tm;
 610        vcpu->arch.vr  = vcpu->arch.vr_tm;
 611        vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
 612}
 613
 614static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
 615{
 616        vcpu->arch.cr_tm  = vcpu->arch.regs.ccr;
 617        vcpu->arch.xer_tm = vcpu->arch.regs.xer;
 618        vcpu->arch.lr_tm  = vcpu->arch.regs.link;
 619        vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
 620        vcpu->arch.amr_tm = vcpu->arch.amr;
 621        vcpu->arch.ppr_tm = vcpu->arch.ppr;
 622        vcpu->arch.dscr_tm = vcpu->arch.dscr;
 623        vcpu->arch.tar_tm = vcpu->arch.tar;
 624        memcpy(vcpu->arch.gpr_tm, vcpu->arch.regs.gpr,
 625               sizeof(vcpu->arch.regs.gpr));
 626        vcpu->arch.fp_tm  = vcpu->arch.fp;
 627        vcpu->arch.vr_tm  = vcpu->arch.vr;
 628        vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
 629}
 630#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 631
 632extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
 633                             unsigned long gpa, unsigned int level,
 634                             unsigned long mmu_seq, unsigned int lpid,
 635                             unsigned long *rmapp, struct rmap_nested **n_rmap);
 636extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
 637                                   struct rmap_nested **n_rmap);
 638extern void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
 639                                           unsigned long clr, unsigned long set,
 640                                           unsigned long hpa, unsigned long nbytes);
 641extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
 642                                const struct kvm_memory_slot *memslot,
 643                                unsigned long gpa, unsigned long hpa,
 644                                unsigned long nbytes);
 645
 646static inline pte_t *
 647find_kvm_secondary_pte_unlocked(struct kvm *kvm, unsigned long ea,
 648                                unsigned *hshift)
 649{
 650        pte_t *pte;
 651
 652        pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
 653        return pte;
 654}
 655
 656static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea,
 657                                            unsigned *hshift)
 658{
 659        pte_t *pte;
 660
 661        VM_WARN(!spin_is_locked(&kvm->mmu_lock),
 662                "%s called with kvm mmu_lock not held \n", __func__);
 663        pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
 664
 665        return pte;
 666}
 667
 668static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq,
 669                                       unsigned long ea, unsigned *hshift)
 670{
 671        pte_t *pte;
 672
 673        VM_WARN(!spin_is_locked(&kvm->mmu_lock),
 674                "%s called with kvm mmu_lock not held \n", __func__);
 675
 676        if (mmu_notifier_retry(kvm, mmu_seq))
 677                return NULL;
 678
 679        pte = __find_linux_pte(kvm->mm->pgd, ea, NULL, hshift);
 680
 681        return pte;
 682}
 683
 684extern pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
 685                                        unsigned long ea, unsigned *hshift);
 686
 687#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
 688
 689#endif /* __ASM_KVM_BOOK3S_64_H__ */
 690