linux/arch/powerpc/kvm/book3s_hv.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
   4 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
   5 *
   6 * Authors:
   7 *    Paul Mackerras <paulus@au1.ibm.com>
   8 *    Alexander Graf <agraf@suse.de>
   9 *    Kevin Wolf <mail@kevin-wolf.de>
  10 *
  11 * Description: KVM functions specific to running on Book 3S
  12 * processors in hypervisor mode (specifically POWER7 and later).
  13 *
  14 * This file is derived from arch/powerpc/kvm/book3s.c,
  15 * by Alexander Graf <agraf@suse.de>.
  16 */
  17
  18#include <linux/kvm_host.h>
  19#include <linux/kernel.h>
  20#include <linux/err.h>
  21#include <linux/slab.h>
  22#include <linux/preempt.h>
  23#include <linux/sched/signal.h>
  24#include <linux/sched/stat.h>
  25#include <linux/delay.h>
  26#include <linux/export.h>
  27#include <linux/fs.h>
  28#include <linux/anon_inodes.h>
  29#include <linux/cpu.h>
  30#include <linux/cpumask.h>
  31#include <linux/spinlock.h>
  32#include <linux/page-flags.h>
  33#include <linux/srcu.h>
  34#include <linux/miscdevice.h>
  35#include <linux/debugfs.h>
  36#include <linux/gfp.h>
  37#include <linux/vmalloc.h>
  38#include <linux/highmem.h>
  39#include <linux/hugetlb.h>
  40#include <linux/kvm_irqfd.h>
  41#include <linux/irqbypass.h>
  42#include <linux/module.h>
  43#include <linux/compiler.h>
  44#include <linux/of.h>
  45
  46#include <asm/ftrace.h>
  47#include <asm/reg.h>
  48#include <asm/ppc-opcode.h>
  49#include <asm/asm-prototypes.h>
  50#include <asm/archrandom.h>
  51#include <asm/debug.h>
  52#include <asm/disassemble.h>
  53#include <asm/cputable.h>
  54#include <asm/cacheflush.h>
  55#include <linux/uaccess.h>
  56#include <asm/interrupt.h>
  57#include <asm/io.h>
  58#include <asm/kvm_ppc.h>
  59#include <asm/kvm_book3s.h>
  60#include <asm/mmu_context.h>
  61#include <asm/lppaca.h>
  62#include <asm/processor.h>
  63#include <asm/cputhreads.h>
  64#include <asm/page.h>
  65#include <asm/hvcall.h>
  66#include <asm/switch_to.h>
  67#include <asm/smp.h>
  68#include <asm/dbell.h>
  69#include <asm/hmi.h>
  70#include <asm/pnv-pci.h>
  71#include <asm/mmu.h>
  72#include <asm/opal.h>
  73#include <asm/xics.h>
  74#include <asm/xive.h>
  75#include <asm/hw_breakpoint.h>
  76#include <asm/kvm_book3s_uvmem.h>
  77#include <asm/ultravisor.h>
  78#include <asm/dtl.h>
  79#include <asm/plpar_wrappers.h>
  80
  81#include "book3s.h"
  82
  83#define CREATE_TRACE_POINTS
  84#include "trace_hv.h"
  85
  86/* #define EXIT_DEBUG */
  87/* #define EXIT_DEBUG_SIMPLE */
  88/* #define EXIT_DEBUG_INT */
  89
  90/* Used to indicate that a guest page fault needs to be handled */
  91#define RESUME_PAGE_FAULT       (RESUME_GUEST | RESUME_FLAG_ARCH1)
  92/* Used to indicate that a guest passthrough interrupt needs to be handled */
  93#define RESUME_PASSTHROUGH      (RESUME_GUEST | RESUME_FLAG_ARCH2)
  94
  95/* Used as a "null" value for timebase values */
  96#define TB_NIL  (~(u64)0)
  97
  98static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
  99
 100static int dynamic_mt_modes = 6;
 101module_param(dynamic_mt_modes, int, 0644);
 102MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
 103static int target_smt_mode;
 104module_param(target_smt_mode, int, 0644);
 105MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)");
 106
 107static bool one_vm_per_core;
 108module_param(one_vm_per_core, bool, S_IRUGO | S_IWUSR);
 109MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires POWER8 or older)");
 110
 111#ifdef CONFIG_KVM_XICS
 112static const struct kernel_param_ops module_param_ops = {
 113        .set = param_set_int,
 114        .get = param_get_int,
 115};
 116
 117module_param_cb(kvm_irq_bypass, &module_param_ops, &kvm_irq_bypass, 0644);
 118MODULE_PARM_DESC(kvm_irq_bypass, "Bypass passthrough interrupt optimization");
 119
 120module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, 0644);
 121MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
 122#endif
 123
 124/* If set, guests are allowed to create and control nested guests */
 125static bool nested = true;
 126module_param(nested, bool, S_IRUGO | S_IWUSR);
 127MODULE_PARM_DESC(nested, "Enable nested virtualization (only on POWER9)");
 128
 129static inline bool nesting_enabled(struct kvm *kvm)
 130{
 131        return kvm->arch.nested_enable && kvm_is_radix(kvm);
 132}
 133
 134static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
 135
 136/*
 137 * RWMR values for POWER8.  These control the rate at which PURR
 138 * and SPURR count and should be set according to the number of
 139 * online threads in the vcore being run.
 140 */
 141#define RWMR_RPA_P8_1THREAD     0x164520C62609AECAUL
 142#define RWMR_RPA_P8_2THREAD     0x7FFF2908450D8DA9UL
 143#define RWMR_RPA_P8_3THREAD     0x164520C62609AECAUL
 144#define RWMR_RPA_P8_4THREAD     0x199A421245058DA9UL
 145#define RWMR_RPA_P8_5THREAD     0x164520C62609AECAUL
 146#define RWMR_RPA_P8_6THREAD     0x164520C62609AECAUL
 147#define RWMR_RPA_P8_7THREAD     0x164520C62609AECAUL
 148#define RWMR_RPA_P8_8THREAD     0x164520C62609AECAUL
 149
 150static unsigned long p8_rwmr_values[MAX_SMT_THREADS + 1] = {
 151        RWMR_RPA_P8_1THREAD,
 152        RWMR_RPA_P8_1THREAD,
 153        RWMR_RPA_P8_2THREAD,
 154        RWMR_RPA_P8_3THREAD,
 155        RWMR_RPA_P8_4THREAD,
 156        RWMR_RPA_P8_5THREAD,
 157        RWMR_RPA_P8_6THREAD,
 158        RWMR_RPA_P8_7THREAD,
 159        RWMR_RPA_P8_8THREAD,
 160};
 161
 162static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
 163                int *ip)
 164{
 165        int i = *ip;
 166        struct kvm_vcpu *vcpu;
 167
 168        while (++i < MAX_SMT_THREADS) {
 169                vcpu = READ_ONCE(vc->runnable_threads[i]);
 170                if (vcpu) {
 171                        *ip = i;
 172                        return vcpu;
 173                }
 174        }
 175        return NULL;
 176}
 177
 178/* Used to traverse the list of runnable threads for a given vcore */
 179#define for_each_runnable_thread(i, vcpu, vc) \
 180        for (i = -1; (vcpu = next_runnable_thread(vc, &i)); )
 181
 182static bool kvmppc_ipi_thread(int cpu)
 183{
 184        unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
 185
 186        /* If we're a nested hypervisor, fall back to ordinary IPIs for now */
 187        if (kvmhv_on_pseries())
 188                return false;
 189
 190        /* On POWER9 we can use msgsnd to IPI any cpu */
 191        if (cpu_has_feature(CPU_FTR_ARCH_300)) {
 192                msg |= get_hard_smp_processor_id(cpu);
 193                smp_mb();
 194                __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
 195                return true;
 196        }
 197
 198        /* On POWER8 for IPIs to threads in the same core, use msgsnd */
 199        if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
 200                preempt_disable();
 201                if (cpu_first_thread_sibling(cpu) ==
 202                    cpu_first_thread_sibling(smp_processor_id())) {
 203                        msg |= cpu_thread_in_core(cpu);
 204                        smp_mb();
 205                        __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
 206                        preempt_enable();
 207                        return true;
 208                }
 209                preempt_enable();
 210        }
 211
 212#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
 213        if (cpu >= 0 && cpu < nr_cpu_ids) {
 214                if (paca_ptrs[cpu]->kvm_hstate.xics_phys) {
 215                        xics_wake_cpu(cpu);
 216                        return true;
 217                }
 218                opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
 219                return true;
 220        }
 221#endif
 222
 223        return false;
 224}
 225
 226static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
 227{
 228        int cpu;
 229        struct rcuwait *waitp;
 230
 231        waitp = kvm_arch_vcpu_get_wait(vcpu);
 232        if (rcuwait_wake_up(waitp))
 233                ++vcpu->stat.generic.halt_wakeup;
 234
 235        cpu = READ_ONCE(vcpu->arch.thread_cpu);
 236        if (cpu >= 0 && kvmppc_ipi_thread(cpu))
 237                return;
 238
 239        /* CPU points to the first thread of the core */
 240        cpu = vcpu->cpu;
 241        if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu))
 242                smp_send_reschedule(cpu);
 243}
 244
 245/*
 246 * We use the vcpu_load/put functions to measure stolen time.
 247 * Stolen time is counted as time when either the vcpu is able to
 248 * run as part of a virtual core, but the task running the vcore
 249 * is preempted or sleeping, or when the vcpu needs something done
 250 * in the kernel by the task running the vcpu, but that task is
 251 * preempted or sleeping.  Those two things have to be counted
 252 * separately, since one of the vcpu tasks will take on the job
 253 * of running the core, and the other vcpu tasks in the vcore will
 254 * sleep waiting for it to do that, but that sleep shouldn't count
 255 * as stolen time.
 256 *
 257 * Hence we accumulate stolen time when the vcpu can run as part of
 258 * a vcore using vc->stolen_tb, and the stolen time when the vcpu
 259 * needs its task to do other things in the kernel (for example,
 260 * service a page fault) in busy_stolen.  We don't accumulate
 261 * stolen time for a vcore when it is inactive, or for a vcpu
 262 * when it is in state RUNNING or NOTREADY.  NOTREADY is a bit of
 263 * a misnomer; it means that the vcpu task is not executing in
 264 * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in
 265 * the kernel.  We don't have any way of dividing up that time
 266 * between time that the vcpu is genuinely stopped, time that
 267 * the task is actively working on behalf of the vcpu, and time
 268 * that the task is preempted, so we don't count any of it as
 269 * stolen.
 270 *
 271 * Updates to busy_stolen are protected by arch.tbacct_lock;
 272 * updates to vc->stolen_tb are protected by the vcore->stoltb_lock
 273 * lock.  The stolen times are measured in units of timebase ticks.
 274 * (Note that the != TB_NIL checks below are purely defensive;
 275 * they should never fail.)
 276 */
 277
 278static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc)
 279{
 280        unsigned long flags;
 281
 282        spin_lock_irqsave(&vc->stoltb_lock, flags);
 283        vc->preempt_tb = mftb();
 284        spin_unlock_irqrestore(&vc->stoltb_lock, flags);
 285}
 286
 287static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc)
 288{
 289        unsigned long flags;
 290
 291        spin_lock_irqsave(&vc->stoltb_lock, flags);
 292        if (vc->preempt_tb != TB_NIL) {
 293                vc->stolen_tb += mftb() - vc->preempt_tb;
 294                vc->preempt_tb = TB_NIL;
 295        }
 296        spin_unlock_irqrestore(&vc->stoltb_lock, flags);
 297}
 298
 299static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
 300{
 301        struct kvmppc_vcore *vc = vcpu->arch.vcore;
 302        unsigned long flags;
 303
 304        /*
 305         * We can test vc->runner without taking the vcore lock,
 306         * because only this task ever sets vc->runner to this
 307         * vcpu, and once it is set to this vcpu, only this task
 308         * ever sets it to NULL.
 309         */
 310        if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
 311                kvmppc_core_end_stolen(vc);
 312
 313        spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
 314        if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
 315            vcpu->arch.busy_preempt != TB_NIL) {
 316                vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
 317                vcpu->arch.busy_preempt = TB_NIL;
 318        }
 319        spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
 320}
 321
 322static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
 323{
 324        struct kvmppc_vcore *vc = vcpu->arch.vcore;
 325        unsigned long flags;
 326
 327        if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
 328                kvmppc_core_start_stolen(vc);
 329
 330        spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
 331        if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
 332                vcpu->arch.busy_preempt = mftb();
 333        spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
 334}
 335
 336static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
 337{
 338        vcpu->arch.pvr = pvr;
 339}
 340
 341/* Dummy value used in computing PCR value below */
 342#define PCR_ARCH_31    (PCR_ARCH_300 << 1)
 343
 344static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
 345{
 346        unsigned long host_pcr_bit = 0, guest_pcr_bit = 0;
 347        struct kvmppc_vcore *vc = vcpu->arch.vcore;
 348
 349        /* We can (emulate) our own architecture version and anything older */
 350        if (cpu_has_feature(CPU_FTR_ARCH_31))
 351                host_pcr_bit = PCR_ARCH_31;
 352        else if (cpu_has_feature(CPU_FTR_ARCH_300))
 353                host_pcr_bit = PCR_ARCH_300;
 354        else if (cpu_has_feature(CPU_FTR_ARCH_207S))
 355                host_pcr_bit = PCR_ARCH_207;
 356        else if (cpu_has_feature(CPU_FTR_ARCH_206))
 357                host_pcr_bit = PCR_ARCH_206;
 358        else
 359                host_pcr_bit = PCR_ARCH_205;
 360
 361        /* Determine lowest PCR bit needed to run guest in given PVR level */
 362        guest_pcr_bit = host_pcr_bit;
 363        if (arch_compat) {
 364                switch (arch_compat) {
 365                case PVR_ARCH_205:
 366                        guest_pcr_bit = PCR_ARCH_205;
 367                        break;
 368                case PVR_ARCH_206:
 369                case PVR_ARCH_206p:
 370                        guest_pcr_bit = PCR_ARCH_206;
 371                        break;
 372                case PVR_ARCH_207:
 373                        guest_pcr_bit = PCR_ARCH_207;
 374                        break;
 375                case PVR_ARCH_300:
 376                        guest_pcr_bit = PCR_ARCH_300;
 377                        break;
 378                case PVR_ARCH_31:
 379                        guest_pcr_bit = PCR_ARCH_31;
 380                        break;
 381                default:
 382                        return -EINVAL;
 383                }
 384        }
 385
 386        /* Check requested PCR bits don't exceed our capabilities */
 387        if (guest_pcr_bit > host_pcr_bit)
 388                return -EINVAL;
 389
 390        spin_lock(&vc->lock);
 391        vc->arch_compat = arch_compat;
 392        /*
 393         * Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit
 394         * Also set all reserved PCR bits
 395         */
 396        vc->pcr = (host_pcr_bit - guest_pcr_bit) | PCR_MASK;
 397        spin_unlock(&vc->lock);
 398
 399        return 0;
 400}
 401
 402static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
 403{
 404        int r;
 405
 406        pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
 407        pr_err("pc  = %.16lx  msr = %.16llx  trap = %x\n",
 408               vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap);
 409        for (r = 0; r < 16; ++r)
 410                pr_err("r%2d = %.16lx  r%d = %.16lx\n",
 411                       r, kvmppc_get_gpr(vcpu, r),
 412                       r+16, kvmppc_get_gpr(vcpu, r+16));
 413        pr_err("ctr = %.16lx  lr  = %.16lx\n",
 414               vcpu->arch.regs.ctr, vcpu->arch.regs.link);
 415        pr_err("srr0 = %.16llx srr1 = %.16llx\n",
 416               vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
 417        pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
 418               vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
 419        pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
 420               vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
 421        pr_err("cr = %.8lx  xer = %.16lx  dsisr = %.8x\n",
 422               vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
 423        pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
 424        pr_err("fault dar = %.16lx dsisr = %.8x\n",
 425               vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
 426        pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
 427        for (r = 0; r < vcpu->arch.slb_max; ++r)
 428                pr_err("  ESID = %.16llx VSID = %.16llx\n",
 429                       vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
 430        pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
 431               vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
 432               vcpu->arch.last_inst);
 433}
 434
 435static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
 436{
 437        return kvm_get_vcpu_by_id(kvm, id);
 438}
 439
 440static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
 441{
 442        vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
 443        vpa->yield_count = cpu_to_be32(1);
 444}
 445
 446static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
 447                   unsigned long addr, unsigned long len)
 448{
 449        /* check address is cacheline aligned */
 450        if (addr & (L1_CACHE_BYTES - 1))
 451                return -EINVAL;
 452        spin_lock(&vcpu->arch.vpa_update_lock);
 453        if (v->next_gpa != addr || v->len != len) {
 454                v->next_gpa = addr;
 455                v->len = addr ? len : 0;
 456                v->update_pending = 1;
 457        }
 458        spin_unlock(&vcpu->arch.vpa_update_lock);
 459        return 0;
 460}
 461
 462/* Length for a per-processor buffer is passed in at offset 4 in the buffer */
 463struct reg_vpa {
 464        u32 dummy;
 465        union {
 466                __be16 hword;
 467                __be32 word;
 468        } length;
 469};
 470
 471static int vpa_is_registered(struct kvmppc_vpa *vpap)
 472{
 473        if (vpap->update_pending)
 474                return vpap->next_gpa != 0;
 475        return vpap->pinned_addr != NULL;
 476}
 477
 478static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
 479                                       unsigned long flags,
 480                                       unsigned long vcpuid, unsigned long vpa)
 481{
 482        struct kvm *kvm = vcpu->kvm;
 483        unsigned long len, nb;
 484        void *va;
 485        struct kvm_vcpu *tvcpu;
 486        int err;
 487        int subfunc;
 488        struct kvmppc_vpa *vpap;
 489
 490        tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
 491        if (!tvcpu)
 492                return H_PARAMETER;
 493
 494        subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
 495        if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
 496            subfunc == H_VPA_REG_SLB) {
 497                /* Registering new area - address must be cache-line aligned */
 498                if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
 499                        return H_PARAMETER;
 500
 501                /* convert logical addr to kernel addr and read length */
 502                va = kvmppc_pin_guest_page(kvm, vpa, &nb);
 503                if (va == NULL)
 504                        return H_PARAMETER;
 505                if (subfunc == H_VPA_REG_VPA)
 506                        len = be16_to_cpu(((struct reg_vpa *)va)->length.hword);
 507                else
 508                        len = be32_to_cpu(((struct reg_vpa *)va)->length.word);
 509                kvmppc_unpin_guest_page(kvm, va, vpa, false);
 510
 511                /* Check length */
 512                if (len > nb || len < sizeof(struct reg_vpa))
 513                        return H_PARAMETER;
 514        } else {
 515                vpa = 0;
 516                len = 0;
 517        }
 518
 519        err = H_PARAMETER;
 520        vpap = NULL;
 521        spin_lock(&tvcpu->arch.vpa_update_lock);
 522
 523        switch (subfunc) {
 524        case H_VPA_REG_VPA:             /* register VPA */
 525                /*
 526                 * The size of our lppaca is 1kB because of the way we align
 527                 * it for the guest to avoid crossing a 4kB boundary. We only
 528                 * use 640 bytes of the structure though, so we should accept
 529                 * clients that set a size of 640.
 530                 */
 531                BUILD_BUG_ON(sizeof(struct lppaca) != 640);
 532                if (len < sizeof(struct lppaca))
 533                        break;
 534                vpap = &tvcpu->arch.vpa;
 535                err = 0;
 536                break;
 537
 538        case H_VPA_REG_DTL:             /* register DTL */
 539                if (len < sizeof(struct dtl_entry))
 540                        break;
 541                len -= len % sizeof(struct dtl_entry);
 542
 543                /* Check that they have previously registered a VPA */
 544                err = H_RESOURCE;
 545                if (!vpa_is_registered(&tvcpu->arch.vpa))
 546                        break;
 547
 548                vpap = &tvcpu->arch.dtl;
 549                err = 0;
 550                break;
 551
 552        case H_VPA_REG_SLB:             /* register SLB shadow buffer */
 553                /* Check that they have previously registered a VPA */
 554                err = H_RESOURCE;
 555                if (!vpa_is_registered(&tvcpu->arch.vpa))
 556                        break;
 557
 558                vpap = &tvcpu->arch.slb_shadow;
 559                err = 0;
 560                break;
 561
 562        case H_VPA_DEREG_VPA:           /* deregister VPA */
 563                /* Check they don't still have a DTL or SLB buf registered */
 564                err = H_RESOURCE;
 565                if (vpa_is_registered(&tvcpu->arch.dtl) ||
 566                    vpa_is_registered(&tvcpu->arch.slb_shadow))
 567                        break;
 568
 569                vpap = &tvcpu->arch.vpa;
 570                err = 0;
 571                break;
 572
 573        case H_VPA_DEREG_DTL:           /* deregister DTL */
 574                vpap = &tvcpu->arch.dtl;
 575                err = 0;
 576                break;
 577
 578        case H_VPA_DEREG_SLB:           /* deregister SLB shadow buffer */
 579                vpap = &tvcpu->arch.slb_shadow;
 580                err = 0;
 581                break;
 582        }
 583
 584        if (vpap) {
 585                vpap->next_gpa = vpa;
 586                vpap->len = len;
 587                vpap->update_pending = 1;
 588        }
 589
 590        spin_unlock(&tvcpu->arch.vpa_update_lock);
 591
 592        return err;
 593}
 594
 595static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
 596{
 597        struct kvm *kvm = vcpu->kvm;
 598        void *va;
 599        unsigned long nb;
 600        unsigned long gpa;
 601
 602        /*
 603         * We need to pin the page pointed to by vpap->next_gpa,
 604         * but we can't call kvmppc_pin_guest_page under the lock
 605         * as it does get_user_pages() and down_read().  So we
 606         * have to drop the lock, pin the page, then get the lock
 607         * again and check that a new area didn't get registered
 608         * in the meantime.
 609         */
 610        for (;;) {
 611                gpa = vpap->next_gpa;
 612                spin_unlock(&vcpu->arch.vpa_update_lock);
 613                va = NULL;
 614                nb = 0;
 615                if (gpa)
 616                        va = kvmppc_pin_guest_page(kvm, gpa, &nb);
 617                spin_lock(&vcpu->arch.vpa_update_lock);
 618                if (gpa == vpap->next_gpa)
 619                        break;
 620                /* sigh... unpin that one and try again */
 621                if (va)
 622                        kvmppc_unpin_guest_page(kvm, va, gpa, false);
 623        }
 624
 625        vpap->update_pending = 0;
 626        if (va && nb < vpap->len) {
 627                /*
 628                 * If it's now too short, it must be that userspace
 629                 * has changed the mappings underlying guest memory,
 630                 * so unregister the region.
 631                 */
 632                kvmppc_unpin_guest_page(kvm, va, gpa, false);
 633                va = NULL;
 634        }
 635        if (vpap->pinned_addr)
 636                kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
 637                                        vpap->dirty);
 638        vpap->gpa = gpa;
 639        vpap->pinned_addr = va;
 640        vpap->dirty = false;
 641        if (va)
 642                vpap->pinned_end = va + vpap->len;
 643}
 644
 645static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
 646{
 647        if (!(vcpu->arch.vpa.update_pending ||
 648              vcpu->arch.slb_shadow.update_pending ||
 649              vcpu->arch.dtl.update_pending))
 650                return;
 651
 652        spin_lock(&vcpu->arch.vpa_update_lock);
 653        if (vcpu->arch.vpa.update_pending) {
 654                kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
 655                if (vcpu->arch.vpa.pinned_addr)
 656                        init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
 657        }
 658        if (vcpu->arch.dtl.update_pending) {
 659                kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
 660                vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
 661                vcpu->arch.dtl_index = 0;
 662        }
 663        if (vcpu->arch.slb_shadow.update_pending)
 664                kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
 665        spin_unlock(&vcpu->arch.vpa_update_lock);
 666}
 667
 668/*
 669 * Return the accumulated stolen time for the vcore up until `now'.
 670 * The caller should hold the vcore lock.
 671 */
 672static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
 673{
 674        u64 p;
 675        unsigned long flags;
 676
 677        spin_lock_irqsave(&vc->stoltb_lock, flags);
 678        p = vc->stolen_tb;
 679        if (vc->vcore_state != VCORE_INACTIVE &&
 680            vc->preempt_tb != TB_NIL)
 681                p += now - vc->preempt_tb;
 682        spin_unlock_irqrestore(&vc->stoltb_lock, flags);
 683        return p;
 684}
 685
 686static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
 687                                    struct kvmppc_vcore *vc)
 688{
 689        struct dtl_entry *dt;
 690        struct lppaca *vpa;
 691        unsigned long stolen;
 692        unsigned long core_stolen;
 693        u64 now;
 694        unsigned long flags;
 695
 696        dt = vcpu->arch.dtl_ptr;
 697        vpa = vcpu->arch.vpa.pinned_addr;
 698        now = mftb();
 699        core_stolen = vcore_stolen_time(vc, now);
 700        stolen = core_stolen - vcpu->arch.stolen_logged;
 701        vcpu->arch.stolen_logged = core_stolen;
 702        spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
 703        stolen += vcpu->arch.busy_stolen;
 704        vcpu->arch.busy_stolen = 0;
 705        spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
 706        if (!dt || !vpa)
 707                return;
 708        memset(dt, 0, sizeof(struct dtl_entry));
 709        dt->dispatch_reason = 7;
 710        dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
 711        dt->timebase = cpu_to_be64(now + vc->tb_offset);
 712        dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
 713        dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
 714        dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
 715        ++dt;
 716        if (dt == vcpu->arch.dtl.pinned_end)
 717                dt = vcpu->arch.dtl.pinned_addr;
 718        vcpu->arch.dtl_ptr = dt;
 719        /* order writing *dt vs. writing vpa->dtl_idx */
 720        smp_wmb();
 721        vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
 722        vcpu->arch.dtl.dirty = true;
 723}
 724
 725/* See if there is a doorbell interrupt pending for a vcpu */
 726static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu)
 727{
 728        int thr;
 729        struct kvmppc_vcore *vc;
 730
 731        if (vcpu->arch.doorbell_request)
 732                return true;
 733        /*
 734         * Ensure that the read of vcore->dpdes comes after the read
 735         * of vcpu->doorbell_request.  This barrier matches the
 736         * smp_wmb() in kvmppc_guest_entry_inject().
 737         */
 738        smp_rmb();
 739        vc = vcpu->arch.vcore;
 740        thr = vcpu->vcpu_id - vc->first_vcpuid;
 741        return !!(vc->dpdes & (1 << thr));
 742}
 743
 744static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
 745{
 746        if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
 747                return true;
 748        if ((!vcpu->arch.vcore->arch_compat) &&
 749            cpu_has_feature(CPU_FTR_ARCH_207S))
 750                return true;
 751        return false;
 752}
 753
 754static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
 755                             unsigned long resource, unsigned long value1,
 756                             unsigned long value2)
 757{
 758        switch (resource) {
 759        case H_SET_MODE_RESOURCE_SET_CIABR:
 760                if (!kvmppc_power8_compatible(vcpu))
 761                        return H_P2;
 762                if (value2)
 763                        return H_P4;
 764                if (mflags)
 765                        return H_UNSUPPORTED_FLAG_START;
 766                /* Guests can't breakpoint the hypervisor */
 767                if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
 768                        return H_P3;
 769                vcpu->arch.ciabr  = value1;
 770                return H_SUCCESS;
 771        case H_SET_MODE_RESOURCE_SET_DAWR0:
 772                if (!kvmppc_power8_compatible(vcpu))
 773                        return H_P2;
 774                if (!ppc_breakpoint_available())
 775                        return H_P2;
 776                if (mflags)
 777                        return H_UNSUPPORTED_FLAG_START;
 778                if (value2 & DABRX_HYP)
 779                        return H_P4;
 780                vcpu->arch.dawr0  = value1;
 781                vcpu->arch.dawrx0 = value2;
 782                return H_SUCCESS;
 783        case H_SET_MODE_RESOURCE_SET_DAWR1:
 784                if (!kvmppc_power8_compatible(vcpu))
 785                        return H_P2;
 786                if (!ppc_breakpoint_available())
 787                        return H_P2;
 788                if (!cpu_has_feature(CPU_FTR_DAWR1))
 789                        return H_P2;
 790                if (!vcpu->kvm->arch.dawr1_enabled)
 791                        return H_FUNCTION;
 792                if (mflags)
 793                        return H_UNSUPPORTED_FLAG_START;
 794                if (value2 & DABRX_HYP)
 795                        return H_P4;
 796                vcpu->arch.dawr1  = value1;
 797                vcpu->arch.dawrx1 = value2;
 798                return H_SUCCESS;
 799        case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
 800                /*
 801                 * KVM does not support mflags=2 (AIL=2) and AIL=1 is reserved.
 802                 * Keep this in synch with kvmppc_filter_guest_lpcr_hv.
 803                 */
 804                if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG) &&
 805                                kvmhv_vcpu_is_radix(vcpu) && mflags == 3)
 806                        return H_UNSUPPORTED_FLAG_START;
 807                return H_TOO_HARD;
 808        default:
 809                return H_TOO_HARD;
 810        }
 811}
 812
 813/* Copy guest memory in place - must reside within a single memslot */
 814static int kvmppc_copy_guest(struct kvm *kvm, gpa_t to, gpa_t from,
 815                                  unsigned long len)
 816{
 817        struct kvm_memory_slot *to_memslot = NULL;
 818        struct kvm_memory_slot *from_memslot = NULL;
 819        unsigned long to_addr, from_addr;
 820        int r;
 821
 822        /* Get HPA for from address */
 823        from_memslot = gfn_to_memslot(kvm, from >> PAGE_SHIFT);
 824        if (!from_memslot)
 825                return -EFAULT;
 826        if ((from + len) >= ((from_memslot->base_gfn + from_memslot->npages)
 827                             << PAGE_SHIFT))
 828                return -EINVAL;
 829        from_addr = gfn_to_hva_memslot(from_memslot, from >> PAGE_SHIFT);
 830        if (kvm_is_error_hva(from_addr))
 831                return -EFAULT;
 832        from_addr |= (from & (PAGE_SIZE - 1));
 833
 834        /* Get HPA for to address */
 835        to_memslot = gfn_to_memslot(kvm, to >> PAGE_SHIFT);
 836        if (!to_memslot)
 837                return -EFAULT;
 838        if ((to + len) >= ((to_memslot->base_gfn + to_memslot->npages)
 839                           << PAGE_SHIFT))
 840                return -EINVAL;
 841        to_addr = gfn_to_hva_memslot(to_memslot, to >> PAGE_SHIFT);
 842        if (kvm_is_error_hva(to_addr))
 843                return -EFAULT;
 844        to_addr |= (to & (PAGE_SIZE - 1));
 845
 846        /* Perform copy */
 847        r = raw_copy_in_user((void __user *)to_addr, (void __user *)from_addr,
 848                             len);
 849        if (r)
 850                return -EFAULT;
 851        mark_page_dirty(kvm, to >> PAGE_SHIFT);
 852        return 0;
 853}
 854
 855static long kvmppc_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
 856                               unsigned long dest, unsigned long src)
 857{
 858        u64 pg_sz = SZ_4K;              /* 4K page size */
 859        u64 pg_mask = SZ_4K - 1;
 860        int ret;
 861
 862        /* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */
 863        if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE |
 864                      H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED))
 865                return H_PARAMETER;
 866
 867        /* dest (and src if copy_page flag set) must be page aligned */
 868        if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask)))
 869                return H_PARAMETER;
 870
 871        /* zero and/or copy the page as determined by the flags */
 872        if (flags & H_COPY_PAGE) {
 873                ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz);
 874                if (ret < 0)
 875                        return H_PARAMETER;
 876        } else if (flags & H_ZERO_PAGE) {
 877                ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz);
 878                if (ret < 0)
 879                        return H_PARAMETER;
 880        }
 881
 882        /* We can ignore the remaining flags */
 883
 884        return H_SUCCESS;
 885}
 886
 887static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
 888{
 889        struct kvmppc_vcore *vcore = target->arch.vcore;
 890
 891        /*
 892         * We expect to have been called by the real mode handler
 893         * (kvmppc_rm_h_confer()) which would have directly returned
 894         * H_SUCCESS if the source vcore wasn't idle (e.g. if it may
 895         * have useful work to do and should not confer) so we don't
 896         * recheck that here.
 897         *
 898         * In the case of the P9 single vcpu per vcore case, the real
 899         * mode handler is not called but no other threads are in the
 900         * source vcore.
 901         */
 902
 903        spin_lock(&vcore->lock);
 904        if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
 905            vcore->vcore_state != VCORE_INACTIVE &&
 906            vcore->runner)
 907                target = vcore->runner;
 908        spin_unlock(&vcore->lock);
 909
 910        return kvm_vcpu_yield_to(target);
 911}
 912
 913static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
 914{
 915        int yield_count = 0;
 916        struct lppaca *lppaca;
 917
 918        spin_lock(&vcpu->arch.vpa_update_lock);
 919        lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
 920        if (lppaca)
 921                yield_count = be32_to_cpu(lppaca->yield_count);
 922        spin_unlock(&vcpu->arch.vpa_update_lock);
 923        return yield_count;
 924}
 925
 926/*
 927 * H_RPT_INVALIDATE hcall handler for nested guests.
 928 *
 929 * Handles only nested process-scoped invalidation requests in L0.
 930 */
 931static int kvmppc_nested_h_rpt_invalidate(struct kvm_vcpu *vcpu)
 932{
 933        unsigned long type = kvmppc_get_gpr(vcpu, 6);
 934        unsigned long pid, pg_sizes, start, end;
 935
 936        /*
 937         * The partition-scoped invalidations aren't handled here in L0.
 938         */
 939        if (type & H_RPTI_TYPE_NESTED)
 940                return RESUME_HOST;
 941
 942        pid = kvmppc_get_gpr(vcpu, 4);
 943        pg_sizes = kvmppc_get_gpr(vcpu, 7);
 944        start = kvmppc_get_gpr(vcpu, 8);
 945        end = kvmppc_get_gpr(vcpu, 9);
 946
 947        do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid,
 948                                type, pg_sizes, start, end);
 949
 950        kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
 951        return RESUME_GUEST;
 952}
 953
 954static long kvmppc_h_rpt_invalidate(struct kvm_vcpu *vcpu,
 955                                    unsigned long id, unsigned long target,
 956                                    unsigned long type, unsigned long pg_sizes,
 957                                    unsigned long start, unsigned long end)
 958{
 959        if (!kvm_is_radix(vcpu->kvm))
 960                return H_UNSUPPORTED;
 961
 962        if (end < start)
 963                return H_P5;
 964
 965        /*
 966         * Partition-scoped invalidation for nested guests.
 967         */
 968        if (type & H_RPTI_TYPE_NESTED) {
 969                if (!nesting_enabled(vcpu->kvm))
 970                        return H_FUNCTION;
 971
 972                /* Support only cores as target */
 973                if (target != H_RPTI_TARGET_CMMU)
 974                        return H_P2;
 975
 976                return do_h_rpt_invalidate_pat(vcpu, id, type, pg_sizes,
 977                                               start, end);
 978        }
 979
 980        /*
 981         * Process-scoped invalidation for L1 guests.
 982         */
 983        do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid,
 984                                type, pg_sizes, start, end);
 985        return H_SUCCESS;
 986}
 987
 988int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
 989{
 990        struct kvm *kvm = vcpu->kvm;
 991        unsigned long req = kvmppc_get_gpr(vcpu, 3);
 992        unsigned long target, ret = H_SUCCESS;
 993        int yield_count;
 994        struct kvm_vcpu *tvcpu;
 995        int idx, rc;
 996
 997        if (req <= MAX_HCALL_OPCODE &&
 998            !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
 999                return RESUME_HOST;
1000
1001        switch (req) {
1002        case H_REMOVE:
1003                ret = kvmppc_h_remove(vcpu, kvmppc_get_gpr(vcpu, 4),
1004                                        kvmppc_get_gpr(vcpu, 5),
1005                                        kvmppc_get_gpr(vcpu, 6));
1006                if (ret == H_TOO_HARD)
1007                        return RESUME_HOST;
1008                break;
1009        case H_ENTER:
1010                ret = kvmppc_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
1011                                        kvmppc_get_gpr(vcpu, 5),
1012                                        kvmppc_get_gpr(vcpu, 6),
1013                                        kvmppc_get_gpr(vcpu, 7));
1014                if (ret == H_TOO_HARD)
1015                        return RESUME_HOST;
1016                break;
1017        case H_READ:
1018                ret = kvmppc_h_read(vcpu, kvmppc_get_gpr(vcpu, 4),
1019                                        kvmppc_get_gpr(vcpu, 5));
1020                if (ret == H_TOO_HARD)
1021                        return RESUME_HOST;
1022                break;
1023        case H_CLEAR_MOD:
1024                ret = kvmppc_h_clear_mod(vcpu, kvmppc_get_gpr(vcpu, 4),
1025                                        kvmppc_get_gpr(vcpu, 5));
1026                if (ret == H_TOO_HARD)
1027                        return RESUME_HOST;
1028                break;
1029        case H_CLEAR_REF:
1030                ret = kvmppc_h_clear_ref(vcpu, kvmppc_get_gpr(vcpu, 4),
1031                                        kvmppc_get_gpr(vcpu, 5));
1032                if (ret == H_TOO_HARD)
1033                        return RESUME_HOST;
1034                break;
1035        case H_PROTECT:
1036                ret = kvmppc_h_protect(vcpu, kvmppc_get_gpr(vcpu, 4),
1037                                        kvmppc_get_gpr(vcpu, 5),
1038                                        kvmppc_get_gpr(vcpu, 6));
1039                if (ret == H_TOO_HARD)
1040                        return RESUME_HOST;
1041                break;
1042        case H_BULK_REMOVE:
1043                ret = kvmppc_h_bulk_remove(vcpu);
1044                if (ret == H_TOO_HARD)
1045                        return RESUME_HOST;
1046                break;
1047
1048        case H_CEDE:
1049                break;
1050        case H_PROD:
1051                target = kvmppc_get_gpr(vcpu, 4);
1052                tvcpu = kvmppc_find_vcpu(kvm, target);
1053                if (!tvcpu) {
1054                        ret = H_PARAMETER;
1055                        break;
1056                }
1057                tvcpu->arch.prodded = 1;
1058                smp_mb();
1059                if (tvcpu->arch.ceded)
1060                        kvmppc_fast_vcpu_kick_hv(tvcpu);
1061                break;
1062        case H_CONFER:
1063                target = kvmppc_get_gpr(vcpu, 4);
1064                if (target == -1)
1065                        break;
1066                tvcpu = kvmppc_find_vcpu(kvm, target);
1067                if (!tvcpu) {
1068                        ret = H_PARAMETER;
1069                        break;
1070                }
1071                yield_count = kvmppc_get_gpr(vcpu, 5);
1072                if (kvmppc_get_yield_count(tvcpu) != yield_count)
1073                        break;
1074                kvm_arch_vcpu_yield_to(tvcpu);
1075                break;
1076        case H_REGISTER_VPA:
1077                ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
1078                                        kvmppc_get_gpr(vcpu, 5),
1079                                        kvmppc_get_gpr(vcpu, 6));
1080                break;
1081        case H_RTAS:
1082                if (list_empty(&kvm->arch.rtas_tokens))
1083                        return RESUME_HOST;
1084
1085                idx = srcu_read_lock(&kvm->srcu);
1086                rc = kvmppc_rtas_hcall(vcpu);
1087                srcu_read_unlock(&kvm->srcu, idx);
1088
1089                if (rc == -ENOENT)
1090                        return RESUME_HOST;
1091                else if (rc == 0)
1092                        break;
1093
1094                /* Send the error out to userspace via KVM_RUN */
1095                return rc;
1096        case H_LOGICAL_CI_LOAD:
1097                ret = kvmppc_h_logical_ci_load(vcpu);
1098                if (ret == H_TOO_HARD)
1099                        return RESUME_HOST;
1100                break;
1101        case H_LOGICAL_CI_STORE:
1102                ret = kvmppc_h_logical_ci_store(vcpu);
1103                if (ret == H_TOO_HARD)
1104                        return RESUME_HOST;
1105                break;
1106        case H_SET_MODE:
1107                ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
1108                                        kvmppc_get_gpr(vcpu, 5),
1109                                        kvmppc_get_gpr(vcpu, 6),
1110                                        kvmppc_get_gpr(vcpu, 7));
1111                if (ret == H_TOO_HARD)
1112                        return RESUME_HOST;
1113                break;
1114        case H_XIRR:
1115        case H_CPPR:
1116        case H_EOI:
1117        case H_IPI:
1118        case H_IPOLL:
1119        case H_XIRR_X:
1120                if (kvmppc_xics_enabled(vcpu)) {
1121                        if (xics_on_xive()) {
1122                                ret = H_NOT_AVAILABLE;
1123                                return RESUME_GUEST;
1124                        }
1125                        ret = kvmppc_xics_hcall(vcpu, req);
1126                        break;
1127                }
1128                return RESUME_HOST;
1129        case H_SET_DABR:
1130                ret = kvmppc_h_set_dabr(vcpu, kvmppc_get_gpr(vcpu, 4));
1131                break;
1132        case H_SET_XDABR:
1133                ret = kvmppc_h_set_xdabr(vcpu, kvmppc_get_gpr(vcpu, 4),
1134                                                kvmppc_get_gpr(vcpu, 5));
1135                break;
1136#ifdef CONFIG_SPAPR_TCE_IOMMU
1137        case H_GET_TCE:
1138                ret = kvmppc_h_get_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
1139                                                kvmppc_get_gpr(vcpu, 5));
1140                if (ret == H_TOO_HARD)
1141                        return RESUME_HOST;
1142                break;
1143        case H_PUT_TCE:
1144                ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
1145                                                kvmppc_get_gpr(vcpu, 5),
1146                                                kvmppc_get_gpr(vcpu, 6));
1147                if (ret == H_TOO_HARD)
1148                        return RESUME_HOST;
1149                break;
1150        case H_PUT_TCE_INDIRECT:
1151                ret = kvmppc_h_put_tce_indirect(vcpu, kvmppc_get_gpr(vcpu, 4),
1152                                                kvmppc_get_gpr(vcpu, 5),
1153                                                kvmppc_get_gpr(vcpu, 6),
1154                                                kvmppc_get_gpr(vcpu, 7));
1155                if (ret == H_TOO_HARD)
1156                        return RESUME_HOST;
1157                break;
1158        case H_STUFF_TCE:
1159                ret = kvmppc_h_stuff_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
1160                                                kvmppc_get_gpr(vcpu, 5),
1161                                                kvmppc_get_gpr(vcpu, 6),
1162                                                kvmppc_get_gpr(vcpu, 7));
1163                if (ret == H_TOO_HARD)
1164                        return RESUME_HOST;
1165                break;
1166#endif
1167        case H_RANDOM:
1168                if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4]))
1169                        ret = H_HARDWARE;
1170                break;
1171        case H_RPT_INVALIDATE:
1172                ret = kvmppc_h_rpt_invalidate(vcpu, kvmppc_get_gpr(vcpu, 4),
1173                                              kvmppc_get_gpr(vcpu, 5),
1174                                              kvmppc_get_gpr(vcpu, 6),
1175                                              kvmppc_get_gpr(vcpu, 7),
1176                                              kvmppc_get_gpr(vcpu, 8),
1177                                              kvmppc_get_gpr(vcpu, 9));
1178                break;
1179
1180        case H_SET_PARTITION_TABLE:
1181                ret = H_FUNCTION;
1182                if (nesting_enabled(kvm))
1183                        ret = kvmhv_set_partition_table(vcpu);
1184                break;
1185        case H_ENTER_NESTED:
1186                ret = H_FUNCTION;
1187                if (!nesting_enabled(kvm))
1188                        break;
1189                ret = kvmhv_enter_nested_guest(vcpu);
1190                if (ret == H_INTERRUPT) {
1191                        kvmppc_set_gpr(vcpu, 3, 0);
1192                        vcpu->arch.hcall_needed = 0;
1193                        return -EINTR;
1194                } else if (ret == H_TOO_HARD) {
1195                        kvmppc_set_gpr(vcpu, 3, 0);
1196                        vcpu->arch.hcall_needed = 0;
1197                        return RESUME_HOST;
1198                }
1199                break;
1200        case H_TLB_INVALIDATE:
1201                ret = H_FUNCTION;
1202                if (nesting_enabled(kvm))
1203                        ret = kvmhv_do_nested_tlbie(vcpu);
1204                break;
1205        case H_COPY_TOFROM_GUEST:
1206                ret = H_FUNCTION;
1207                if (nesting_enabled(kvm))
1208                        ret = kvmhv_copy_tofrom_guest_nested(vcpu);
1209                break;
1210        case H_PAGE_INIT:
1211                ret = kvmppc_h_page_init(vcpu, kvmppc_get_gpr(vcpu, 4),
1212                                         kvmppc_get_gpr(vcpu, 5),
1213                                         kvmppc_get_gpr(vcpu, 6));
1214                break;
1215        case H_SVM_PAGE_IN:
1216                ret = H_UNSUPPORTED;
1217                if (kvmppc_get_srr1(vcpu) & MSR_S)
1218                        ret = kvmppc_h_svm_page_in(kvm,
1219                                                   kvmppc_get_gpr(vcpu, 4),
1220                                                   kvmppc_get_gpr(vcpu, 5),
1221                                                   kvmppc_get_gpr(vcpu, 6));
1222                break;
1223        case H_SVM_PAGE_OUT:
1224                ret = H_UNSUPPORTED;
1225                if (kvmppc_get_srr1(vcpu) & MSR_S)
1226                        ret = kvmppc_h_svm_page_out(kvm,
1227                                                    kvmppc_get_gpr(vcpu, 4),
1228                                                    kvmppc_get_gpr(vcpu, 5),
1229                                                    kvmppc_get_gpr(vcpu, 6));
1230                break;
1231        case H_SVM_INIT_START:
1232                ret = H_UNSUPPORTED;
1233                if (kvmppc_get_srr1(vcpu) & MSR_S)
1234                        ret = kvmppc_h_svm_init_start(kvm);
1235                break;
1236        case H_SVM_INIT_DONE:
1237                ret = H_UNSUPPORTED;
1238                if (kvmppc_get_srr1(vcpu) & MSR_S)
1239                        ret = kvmppc_h_svm_init_done(kvm);
1240                break;
1241        case H_SVM_INIT_ABORT:
1242                /*
1243                 * Even if that call is made by the Ultravisor, the SSR1 value
1244                 * is the guest context one, with the secure bit clear as it has
1245                 * not yet been secured. So we can't check it here.
1246                 * Instead the kvm->arch.secure_guest flag is checked inside
1247                 * kvmppc_h_svm_init_abort().
1248                 */
1249                ret = kvmppc_h_svm_init_abort(kvm);
1250                break;
1251
1252        default:
1253                return RESUME_HOST;
1254        }
1255        WARN_ON_ONCE(ret == H_TOO_HARD);
1256        kvmppc_set_gpr(vcpu, 3, ret);
1257        vcpu->arch.hcall_needed = 0;
1258        return RESUME_GUEST;
1259}
1260
1261/*
1262 * Handle H_CEDE in the P9 path where we don't call the real-mode hcall
1263 * handlers in book3s_hv_rmhandlers.S.
1264 *
1265 * This has to be done early, not in kvmppc_pseries_do_hcall(), so
1266 * that the cede logic in kvmppc_run_single_vcpu() works properly.
1267 */
1268static void kvmppc_cede(struct kvm_vcpu *vcpu)
1269{
1270        vcpu->arch.shregs.msr |= MSR_EE;
1271        vcpu->arch.ceded = 1;
1272        smp_mb();
1273        if (vcpu->arch.prodded) {
1274                vcpu->arch.prodded = 0;
1275                smp_mb();
1276                vcpu->arch.ceded = 0;
1277        }
1278}
1279
1280static int kvmppc_hcall_impl_hv(unsigned long cmd)
1281{
1282        switch (cmd) {
1283        case H_CEDE:
1284        case H_PROD:
1285        case H_CONFER:
1286        case H_REGISTER_VPA:
1287        case H_SET_MODE:
1288        case H_LOGICAL_CI_LOAD:
1289        case H_LOGICAL_CI_STORE:
1290#ifdef CONFIG_KVM_XICS
1291        case H_XIRR:
1292        case H_CPPR:
1293        case H_EOI:
1294        case H_IPI:
1295        case H_IPOLL:
1296        case H_XIRR_X:
1297#endif
1298        case H_PAGE_INIT:
1299        case H_RPT_INVALIDATE:
1300                return 1;
1301        }
1302
1303        /* See if it's in the real-mode table */
1304        return kvmppc_hcall_impl_hv_realmode(cmd);
1305}
1306
1307static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu)
1308{
1309        u32 last_inst;
1310
1311        if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
1312                                        EMULATE_DONE) {
1313                /*
1314                 * Fetch failed, so return to guest and
1315                 * try executing it again.
1316                 */
1317                return RESUME_GUEST;
1318        }
1319
1320        if (last_inst == KVMPPC_INST_SW_BREAKPOINT) {
1321                vcpu->run->exit_reason = KVM_EXIT_DEBUG;
1322                vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
1323                return RESUME_HOST;
1324        } else {
1325                kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1326                return RESUME_GUEST;
1327        }
1328}
1329
1330static void do_nothing(void *x)
1331{
1332}
1333
1334static unsigned long kvmppc_read_dpdes(struct kvm_vcpu *vcpu)
1335{
1336        int thr, cpu, pcpu, nthreads;
1337        struct kvm_vcpu *v;
1338        unsigned long dpdes;
1339
1340        nthreads = vcpu->kvm->arch.emul_smt_mode;
1341        dpdes = 0;
1342        cpu = vcpu->vcpu_id & ~(nthreads - 1);
1343        for (thr = 0; thr < nthreads; ++thr, ++cpu) {
1344                v = kvmppc_find_vcpu(vcpu->kvm, cpu);
1345                if (!v)
1346                        continue;
1347                /*
1348                 * If the vcpu is currently running on a physical cpu thread,
1349                 * interrupt it in order to pull it out of the guest briefly,
1350                 * which will update its vcore->dpdes value.
1351                 */
1352                pcpu = READ_ONCE(v->cpu);
1353                if (pcpu >= 0)
1354                        smp_call_function_single(pcpu, do_nothing, NULL, 1);
1355                if (kvmppc_doorbell_pending(v))
1356                        dpdes |= 1 << thr;
1357        }
1358        return dpdes;
1359}
1360
1361/*
1362 * On POWER9, emulate doorbell-related instructions in order to
1363 * give the guest the illusion of running on a multi-threaded core.
1364 * The instructions emulated are msgsndp, msgclrp, mfspr TIR,
1365 * and mfspr DPDES.
1366 */
1367static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
1368{
1369        u32 inst, rb, thr;
1370        unsigned long arg;
1371        struct kvm *kvm = vcpu->kvm;
1372        struct kvm_vcpu *tvcpu;
1373
1374        if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE)
1375                return RESUME_GUEST;
1376        if (get_op(inst) != 31)
1377                return EMULATE_FAIL;
1378        rb = get_rb(inst);
1379        thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1);
1380        switch (get_xop(inst)) {
1381        case OP_31_XOP_MSGSNDP:
1382                arg = kvmppc_get_gpr(vcpu, rb);
1383                if (((arg >> 27) & 0x1f) != PPC_DBELL_SERVER)
1384                        break;
1385                arg &= 0x7f;
1386                if (arg >= kvm->arch.emul_smt_mode)
1387                        break;
1388                tvcpu = kvmppc_find_vcpu(kvm, vcpu->vcpu_id - thr + arg);
1389                if (!tvcpu)
1390                        break;
1391                if (!tvcpu->arch.doorbell_request) {
1392                        tvcpu->arch.doorbell_request = 1;
1393                        kvmppc_fast_vcpu_kick_hv(tvcpu);
1394                }
1395                break;
1396        case OP_31_XOP_MSGCLRP:
1397                arg = kvmppc_get_gpr(vcpu, rb);
1398                if (((arg >> 27) & 0x1f) != PPC_DBELL_SERVER)
1399                        break;
1400                vcpu->arch.vcore->dpdes = 0;
1401                vcpu->arch.doorbell_request = 0;
1402                break;
1403        case OP_31_XOP_MFSPR:
1404                switch (get_sprn(inst)) {
1405                case SPRN_TIR:
1406                        arg = thr;
1407                        break;
1408                case SPRN_DPDES:
1409                        arg = kvmppc_read_dpdes(vcpu);
1410                        break;
1411                default:
1412                        return EMULATE_FAIL;
1413                }
1414                kvmppc_set_gpr(vcpu, get_rt(inst), arg);
1415                break;
1416        default:
1417                return EMULATE_FAIL;
1418        }
1419        kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
1420        return RESUME_GUEST;
1421}
1422
1423static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
1424                                 struct task_struct *tsk)
1425{
1426        struct kvm_run *run = vcpu->run;
1427        int r = RESUME_HOST;
1428
1429        vcpu->stat.sum_exits++;
1430
1431        /*
1432         * This can happen if an interrupt occurs in the last stages
1433         * of guest entry or the first stages of guest exit (i.e. after
1434         * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV
1435         * and before setting it to KVM_GUEST_MODE_HOST_HV).
1436         * That can happen due to a bug, or due to a machine check
1437         * occurring at just the wrong time.
1438         */
1439        if (vcpu->arch.shregs.msr & MSR_HV) {
1440                printk(KERN_EMERG "KVM trap in HV mode!\n");
1441                printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
1442                        vcpu->arch.trap, kvmppc_get_pc(vcpu),
1443                        vcpu->arch.shregs.msr);
1444                kvmppc_dump_regs(vcpu);
1445                run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1446                run->hw.hardware_exit_reason = vcpu->arch.trap;
1447                return RESUME_HOST;
1448        }
1449        run->exit_reason = KVM_EXIT_UNKNOWN;
1450        run->ready_for_interrupt_injection = 1;
1451        switch (vcpu->arch.trap) {
1452        /* We're good on these - the host merely wanted to get our attention */
1453        case BOOK3S_INTERRUPT_HV_DECREMENTER:
1454                vcpu->stat.dec_exits++;
1455                r = RESUME_GUEST;
1456                break;
1457        case BOOK3S_INTERRUPT_EXTERNAL:
1458        case BOOK3S_INTERRUPT_H_DOORBELL:
1459        case BOOK3S_INTERRUPT_H_VIRT:
1460                vcpu->stat.ext_intr_exits++;
1461                r = RESUME_GUEST;
1462                break;
1463        /* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/
1464        case BOOK3S_INTERRUPT_HMI:
1465        case BOOK3S_INTERRUPT_PERFMON:
1466        case BOOK3S_INTERRUPT_SYSTEM_RESET:
1467                r = RESUME_GUEST;
1468                break;
1469        case BOOK3S_INTERRUPT_MACHINE_CHECK: {
1470                static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
1471                                              DEFAULT_RATELIMIT_BURST);
1472                /*
1473                 * Print the MCE event to host console. Ratelimit so the guest
1474                 * can't flood the host log.
1475                 */
1476                if (__ratelimit(&rs))
1477                        machine_check_print_event_info(&vcpu->arch.mce_evt,false, true);
1478
1479                /*
1480                 * If the guest can do FWNMI, exit to userspace so it can
1481                 * deliver a FWNMI to the guest.
1482                 * Otherwise we synthesize a machine check for the guest
1483                 * so that it knows that the machine check occurred.
1484                 */
1485                if (!vcpu->kvm->arch.fwnmi_enabled) {
1486                        ulong flags = vcpu->arch.shregs.msr & 0x083c0000;
1487                        kvmppc_core_queue_machine_check(vcpu, flags);
1488                        r = RESUME_GUEST;
1489                        break;
1490                }
1491
1492                /* Exit to guest with KVM_EXIT_NMI as exit reason */
1493                run->exit_reason = KVM_EXIT_NMI;
1494                run->hw.hardware_exit_reason = vcpu->arch.trap;
1495                /* Clear out the old NMI status from run->flags */
1496                run->flags &= ~KVM_RUN_PPC_NMI_DISP_MASK;
1497                /* Now set the NMI status */
1498                if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED)
1499                        run->flags |= KVM_RUN_PPC_NMI_DISP_FULLY_RECOV;
1500                else
1501                        run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV;
1502
1503                r = RESUME_HOST;
1504                break;
1505        }
1506        case BOOK3S_INTERRUPT_PROGRAM:
1507        {
1508                ulong flags;
1509                /*
1510                 * Normally program interrupts are delivered directly
1511                 * to the guest by the hardware, but we can get here
1512                 * as a result of a hypervisor emulation interrupt
1513                 * (e40) getting turned into a 700 by BML RTAS.
1514                 */
1515                flags = vcpu->arch.shregs.msr & 0x1f0000ull;
1516                kvmppc_core_queue_program(vcpu, flags);
1517                r = RESUME_GUEST;
1518                break;
1519        }
1520        case BOOK3S_INTERRUPT_SYSCALL:
1521        {
1522                int i;
1523
1524                if (unlikely(vcpu->arch.shregs.msr & MSR_PR)) {
1525                        /*
1526                         * Guest userspace executed sc 1. This can only be
1527                         * reached by the P9 path because the old path
1528                         * handles this case in realmode hcall handlers.
1529                         */
1530                        if (!kvmhv_vcpu_is_radix(vcpu)) {
1531                                /*
1532                                 * A guest could be running PR KVM, so this
1533                                 * may be a PR KVM hcall. It must be reflected
1534                                 * to the guest kernel as a sc interrupt.
1535                                 */
1536                                kvmppc_core_queue_syscall(vcpu);
1537                        } else {
1538                                /*
1539                                 * Radix guests can not run PR KVM or nested HV
1540                                 * hash guests which might run PR KVM, so this
1541                                 * is always a privilege fault. Send a program
1542                                 * check to guest kernel.
1543                                 */
1544                                kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
1545                        }
1546                        r = RESUME_GUEST;
1547                        break;
1548                }
1549
1550                /*
1551                 * hcall - gather args and set exit_reason. This will next be
1552                 * handled by kvmppc_pseries_do_hcall which may be able to deal
1553                 * with it and resume guest, or may punt to userspace.
1554                 */
1555                run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
1556                for (i = 0; i < 9; ++i)
1557                        run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
1558                run->exit_reason = KVM_EXIT_PAPR_HCALL;
1559                vcpu->arch.hcall_needed = 1;
1560                r = RESUME_HOST;
1561                break;
1562        }
1563        /*
1564         * We get these next two if the guest accesses a page which it thinks
1565         * it has mapped but which is not actually present, either because
1566         * it is for an emulated I/O device or because the corresonding
1567         * host page has been paged out.
1568         *
1569         * Any other HDSI/HISI interrupts have been handled already for P7/8
1570         * guests. For POWER9 hash guests not using rmhandlers, basic hash
1571         * fault handling is done here.
1572         */
1573        case BOOK3S_INTERRUPT_H_DATA_STORAGE: {
1574                unsigned long vsid;
1575                long err;
1576
1577                if (vcpu->arch.fault_dsisr == HDSISR_CANARY) {
1578                        r = RESUME_GUEST; /* Just retry if it's the canary */
1579                        break;
1580                }
1581
1582                if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
1583                        /*
1584                         * Radix doesn't require anything, and pre-ISAv3.0 hash
1585                         * already attempted to handle this in rmhandlers. The
1586                         * hash fault handling below is v3 only (it uses ASDR
1587                         * via fault_gpa).
1588                         */
1589                        r = RESUME_PAGE_FAULT;
1590                        break;
1591                }
1592
1593                if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) {
1594                        kvmppc_core_queue_data_storage(vcpu,
1595                                vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
1596                        r = RESUME_GUEST;
1597                        break;
1598                }
1599
1600                if (!(vcpu->arch.shregs.msr & MSR_DR))
1601                        vsid = vcpu->kvm->arch.vrma_slb_v;
1602                else
1603                        vsid = vcpu->arch.fault_gpa;
1604
1605                err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
1606                                vsid, vcpu->arch.fault_dsisr, true);
1607                if (err == 0) {
1608                        r = RESUME_GUEST;
1609                } else if (err == -1 || err == -2) {
1610                        r = RESUME_PAGE_FAULT;
1611                } else {
1612                        kvmppc_core_queue_data_storage(vcpu,
1613                                vcpu->arch.fault_dar, err);
1614                        r = RESUME_GUEST;
1615                }
1616                break;
1617        }
1618        case BOOK3S_INTERRUPT_H_INST_STORAGE: {
1619                unsigned long vsid;
1620                long err;
1621
1622                vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
1623                vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
1624                        DSISR_SRR1_MATCH_64S;
1625                if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
1626                        /*
1627                         * Radix doesn't require anything, and pre-ISAv3.0 hash
1628                         * already attempted to handle this in rmhandlers. The
1629                         * hash fault handling below is v3 only (it uses ASDR
1630                         * via fault_gpa).
1631                         */
1632                        if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
1633                                vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
1634                        r = RESUME_PAGE_FAULT;
1635                        break;
1636                }
1637
1638                if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) {
1639                        kvmppc_core_queue_inst_storage(vcpu,
1640                                vcpu->arch.fault_dsisr);
1641                        r = RESUME_GUEST;
1642                        break;
1643                }
1644
1645                if (!(vcpu->arch.shregs.msr & MSR_IR))
1646                        vsid = vcpu->kvm->arch.vrma_slb_v;
1647                else
1648                        vsid = vcpu->arch.fault_gpa;
1649
1650                err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
1651                                vsid, vcpu->arch.fault_dsisr, false);
1652                if (err == 0) {
1653                        r = RESUME_GUEST;
1654                } else if (err == -1) {
1655                        r = RESUME_PAGE_FAULT;
1656                } else {
1657                        kvmppc_core_queue_inst_storage(vcpu, err);
1658                        r = RESUME_GUEST;
1659                }
1660                break;
1661        }
1662
1663        /*
1664         * This occurs if the guest executes an illegal instruction.
1665         * If the guest debug is disabled, generate a program interrupt
1666         * to the guest. If guest debug is enabled, we need to check
1667         * whether the instruction is a software breakpoint instruction.
1668         * Accordingly return to Guest or Host.
1669         */
1670        case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
1671                if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED)
1672                        vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ?
1673                                swab32(vcpu->arch.emul_inst) :
1674                                vcpu->arch.emul_inst;
1675                if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
1676                        r = kvmppc_emulate_debug_inst(vcpu);
1677                } else {
1678                        kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1679                        r = RESUME_GUEST;
1680                }
1681                break;
1682        /*
1683         * This occurs if the guest (kernel or userspace), does something that
1684         * is prohibited by HFSCR.
1685         * On POWER9, this could be a doorbell instruction that we need
1686         * to emulate.
1687         * Otherwise, we just generate a program interrupt to the guest.
1688         */
1689        case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
1690                r = EMULATE_FAIL;
1691                if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) &&
1692                    cpu_has_feature(CPU_FTR_ARCH_300))
1693                        r = kvmppc_emulate_doorbell_instr(vcpu);
1694                if (r == EMULATE_FAIL) {
1695                        kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1696                        r = RESUME_GUEST;
1697                }
1698                break;
1699
1700#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1701        case BOOK3S_INTERRUPT_HV_SOFTPATCH:
1702                /*
1703                 * This occurs for various TM-related instructions that
1704                 * we need to emulate on POWER9 DD2.2.  We have already
1705                 * handled the cases where the guest was in real-suspend
1706                 * mode and was transitioning to transactional state.
1707                 */
1708                r = kvmhv_p9_tm_emulation(vcpu);
1709                break;
1710#endif
1711
1712        case BOOK3S_INTERRUPT_HV_RM_HARD:
1713                r = RESUME_PASSTHROUGH;
1714                break;
1715        default:
1716                kvmppc_dump_regs(vcpu);
1717                printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
1718                        vcpu->arch.trap, kvmppc_get_pc(vcpu),
1719                        vcpu->arch.shregs.msr);
1720                run->hw.hardware_exit_reason = vcpu->arch.trap;
1721                r = RESUME_HOST;
1722                break;
1723        }
1724
1725        return r;
1726}
1727
1728static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
1729{
1730        int r;
1731        int srcu_idx;
1732
1733        vcpu->stat.sum_exits++;
1734
1735        /*
1736         * This can happen if an interrupt occurs in the last stages
1737         * of guest entry or the first stages of guest exit (i.e. after
1738         * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV
1739         * and before setting it to KVM_GUEST_MODE_HOST_HV).
1740         * That can happen due to a bug, or due to a machine check
1741         * occurring at just the wrong time.
1742         */
1743        if (vcpu->arch.shregs.msr & MSR_HV) {
1744                pr_emerg("KVM trap in HV mode while nested!\n");
1745                pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n",
1746                         vcpu->arch.trap, kvmppc_get_pc(vcpu),
1747                         vcpu->arch.shregs.msr);
1748                kvmppc_dump_regs(vcpu);
1749                return RESUME_HOST;
1750        }
1751        switch (vcpu->arch.trap) {
1752        /* We're good on these - the host merely wanted to get our attention */
1753        case BOOK3S_INTERRUPT_HV_DECREMENTER:
1754                vcpu->stat.dec_exits++;
1755                r = RESUME_GUEST;
1756                break;
1757        case BOOK3S_INTERRUPT_EXTERNAL:
1758                vcpu->stat.ext_intr_exits++;
1759                r = RESUME_HOST;
1760                break;
1761        case BOOK3S_INTERRUPT_H_DOORBELL:
1762        case BOOK3S_INTERRUPT_H_VIRT:
1763                vcpu->stat.ext_intr_exits++;
1764                r = RESUME_GUEST;
1765                break;
1766        /* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/
1767        case BOOK3S_INTERRUPT_HMI:
1768        case BOOK3S_INTERRUPT_PERFMON:
1769        case BOOK3S_INTERRUPT_SYSTEM_RESET:
1770                r = RESUME_GUEST;
1771                break;
1772        case BOOK3S_INTERRUPT_MACHINE_CHECK:
1773        {
1774                static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
1775                                              DEFAULT_RATELIMIT_BURST);
1776                /* Pass the machine check to the L1 guest */
1777                r = RESUME_HOST;
1778                /* Print the MCE event to host console. */
1779                if (__ratelimit(&rs))
1780                        machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
1781                break;
1782        }
1783        /*
1784         * We get these next two if the guest accesses a page which it thinks
1785         * it has mapped but which is not actually present, either because
1786         * it is for an emulated I/O device or because the corresonding
1787         * host page has been paged out.
1788         */
1789        case BOOK3S_INTERRUPT_H_DATA_STORAGE:
1790                srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1791                r = kvmhv_nested_page_fault(vcpu);
1792                srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1793                break;
1794        case BOOK3S_INTERRUPT_H_INST_STORAGE:
1795                vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
1796                vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) &
1797                                         DSISR_SRR1_MATCH_64S;
1798                if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
1799                        vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
1800                srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1801                r = kvmhv_nested_page_fault(vcpu);
1802                srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1803                break;
1804
1805#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1806        case BOOK3S_INTERRUPT_HV_SOFTPATCH:
1807                /*
1808                 * This occurs for various TM-related instructions that
1809                 * we need to emulate on POWER9 DD2.2.  We have already
1810                 * handled the cases where the guest was in real-suspend
1811                 * mode and was transitioning to transactional state.
1812                 */
1813                r = kvmhv_p9_tm_emulation(vcpu);
1814                break;
1815#endif
1816
1817        case BOOK3S_INTERRUPT_HV_RM_HARD:
1818                vcpu->arch.trap = 0;
1819                r = RESUME_GUEST;
1820                if (!xics_on_xive())
1821                        kvmppc_xics_rm_complete(vcpu, 0);
1822                break;
1823        case BOOK3S_INTERRUPT_SYSCALL:
1824        {
1825                unsigned long req = kvmppc_get_gpr(vcpu, 3);
1826
1827                /*
1828                 * The H_RPT_INVALIDATE hcalls issued by nested
1829                 * guests for process-scoped invalidations when
1830                 * GTSE=0, are handled here in L0.
1831                 */
1832                if (req == H_RPT_INVALIDATE) {
1833                        r = kvmppc_nested_h_rpt_invalidate(vcpu);
1834                        break;
1835                }
1836
1837                r = RESUME_HOST;
1838                break;
1839        }
1840        default:
1841                r = RESUME_HOST;
1842                break;
1843        }
1844
1845        return r;
1846}
1847
1848static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
1849                                            struct kvm_sregs *sregs)
1850{
1851        int i;
1852
1853        memset(sregs, 0, sizeof(struct kvm_sregs));
1854        sregs->pvr = vcpu->arch.pvr;
1855        for (i = 0; i < vcpu->arch.slb_max; i++) {
1856                sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
1857                sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1858        }
1859
1860        return 0;
1861}
1862
1863static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
1864                                            struct kvm_sregs *sregs)
1865{
1866        int i, j;
1867
1868        /* Only accept the same PVR as the host's, since we can't spoof it */
1869        if (sregs->pvr != vcpu->arch.pvr)
1870                return -EINVAL;
1871
1872        j = 0;
1873        for (i = 0; i < vcpu->arch.slb_nr; i++) {
1874                if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
1875                        vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
1876                        vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
1877                        ++j;
1878                }
1879        }
1880        vcpu->arch.slb_max = j;
1881
1882        return 0;
1883}
1884
1885/*
1886 * Enforce limits on guest LPCR values based on hardware availability,
1887 * guest configuration, and possibly hypervisor support and security
1888 * concerns.
1889 */
1890unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm, unsigned long lpcr)
1891{
1892        /* LPCR_TC only applies to HPT guests */
1893        if (kvm_is_radix(kvm))
1894                lpcr &= ~LPCR_TC;
1895
1896        /* On POWER8 and above, userspace can modify AIL */
1897        if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1898                lpcr &= ~LPCR_AIL;
1899        if ((lpcr & LPCR_AIL) != LPCR_AIL_3)
1900                lpcr &= ~LPCR_AIL; /* LPCR[AIL]=1/2 is disallowed */
1901        /*
1902         * On some POWER9s we force AIL off for radix guests to prevent
1903         * executing in MSR[HV]=1 mode with the MMU enabled and PIDR set to
1904         * guest, which can result in Q0 translations with LPID=0 PID=PIDR to
1905         * be cached, which the host TLB management does not expect.
1906         */
1907        if (kvm_is_radix(kvm) && cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
1908                lpcr &= ~LPCR_AIL;
1909
1910        /*
1911         * On POWER9, allow userspace to enable large decrementer for the
1912         * guest, whether or not the host has it enabled.
1913         */
1914        if (!cpu_has_feature(CPU_FTR_ARCH_300))
1915                lpcr &= ~LPCR_LD;
1916
1917        return lpcr;
1918}
1919
1920static void verify_lpcr(struct kvm *kvm, unsigned long lpcr)
1921{
1922        if (lpcr != kvmppc_filter_lpcr_hv(kvm, lpcr)) {
1923                WARN_ONCE(1, "lpcr 0x%lx differs from filtered 0x%lx\n",
1924                          lpcr, kvmppc_filter_lpcr_hv(kvm, lpcr));
1925        }
1926}
1927
1928static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
1929                bool preserve_top32)
1930{
1931        struct kvm *kvm = vcpu->kvm;
1932        struct kvmppc_vcore *vc = vcpu->arch.vcore;
1933        u64 mask;
1934
1935        spin_lock(&vc->lock);
1936
1937        /*
1938         * Userspace can only modify
1939         * DPFD (default prefetch depth), ILE (interrupt little-endian),
1940         * TC (translation control), AIL (alternate interrupt location),
1941         * LD (large decrementer).
1942         * These are subject to restrictions from kvmppc_filter_lcpr_hv().
1943         */
1944        mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD;
1945
1946        /* Broken 32-bit version of LPCR must not clear top bits */
1947        if (preserve_top32)
1948                mask &= 0xFFFFFFFF;
1949
1950        new_lpcr = kvmppc_filter_lpcr_hv(kvm,
1951                        (vc->lpcr & ~mask) | (new_lpcr & mask));
1952
1953        /*
1954         * If ILE (interrupt little-endian) has changed, update the
1955         * MSR_LE bit in the intr_msr for each vcpu in this vcore.
1956         */
1957        if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
1958                struct kvm_vcpu *vcpu;
1959                int i;
1960
1961                kvm_for_each_vcpu(i, vcpu, kvm) {
1962                        if (vcpu->arch.vcore != vc)
1963                                continue;
1964                        if (new_lpcr & LPCR_ILE)
1965                                vcpu->arch.intr_msr |= MSR_LE;
1966                        else
1967                                vcpu->arch.intr_msr &= ~MSR_LE;
1968                }
1969        }
1970
1971        vc->lpcr = new_lpcr;
1972
1973        spin_unlock(&vc->lock);
1974}
1975
1976static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1977                                 union kvmppc_one_reg *val)
1978{
1979        int r = 0;
1980        long int i;
1981
1982        switch (id) {
1983        case KVM_REG_PPC_DEBUG_INST:
1984                *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1985                break;
1986        case KVM_REG_PPC_HIOR:
1987                *val = get_reg_val(id, 0);
1988                break;
1989        case KVM_REG_PPC_DABR:
1990                *val = get_reg_val(id, vcpu->arch.dabr);
1991                break;
1992        case KVM_REG_PPC_DABRX:
1993                *val = get_reg_val(id, vcpu->arch.dabrx);
1994                break;
1995        case KVM_REG_PPC_DSCR:
1996                *val = get_reg_val(id, vcpu->arch.dscr);
1997                break;
1998        case KVM_REG_PPC_PURR:
1999                *val = get_reg_val(id, vcpu->arch.purr);
2000                break;
2001        case KVM_REG_PPC_SPURR:
2002                *val = get_reg_val(id, vcpu->arch.spurr);
2003                break;
2004        case KVM_REG_PPC_AMR:
2005                *val = get_reg_val(id, vcpu->arch.amr);
2006                break;
2007        case KVM_REG_PPC_UAMOR:
2008                *val = get_reg_val(id, vcpu->arch.uamor);
2009                break;
2010        case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
2011                i = id - KVM_REG_PPC_MMCR0;
2012                *val = get_reg_val(id, vcpu->arch.mmcr[i]);
2013                break;
2014        case KVM_REG_PPC_MMCR2:
2015                *val = get_reg_val(id, vcpu->arch.mmcr[2]);
2016                break;
2017        case KVM_REG_PPC_MMCRA:
2018                *val = get_reg_val(id, vcpu->arch.mmcra);
2019                break;
2020        case KVM_REG_PPC_MMCRS:
2021                *val = get_reg_val(id, vcpu->arch.mmcrs);
2022                break;
2023        case KVM_REG_PPC_MMCR3:
2024                *val = get_reg_val(id, vcpu->arch.mmcr[3]);
2025                break;
2026        case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
2027                i = id - KVM_REG_PPC_PMC1;
2028                *val = get_reg_val(id, vcpu->arch.pmc[i]);
2029                break;
2030        case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
2031                i = id - KVM_REG_PPC_SPMC1;
2032                *val = get_reg_val(id, vcpu->arch.spmc[i]);
2033                break;
2034        case KVM_REG_PPC_SIAR:
2035                *val = get_reg_val(id, vcpu->arch.siar);
2036                break;
2037        case KVM_REG_PPC_SDAR:
2038                *val = get_reg_val(id, vcpu->arch.sdar);
2039                break;
2040        case KVM_REG_PPC_SIER:
2041                *val = get_reg_val(id, vcpu->arch.sier[0]);
2042                break;
2043        case KVM_REG_PPC_SIER2:
2044                *val = get_reg_val(id, vcpu->arch.sier[1]);
2045                break;
2046        case KVM_REG_PPC_SIER3:
2047                *val = get_reg_val(id, vcpu->arch.sier[2]);
2048                break;
2049        case KVM_REG_PPC_IAMR:
2050                *val = get_reg_val(id, vcpu->arch.iamr);
2051                break;
2052        case KVM_REG_PPC_PSPB:
2053                *val = get_reg_val(id, vcpu->arch.pspb);
2054                break;
2055        case KVM_REG_PPC_DPDES:
2056                /*
2057                 * On POWER9, where we are emulating msgsndp etc.,
2058                 * we return 1 bit for each vcpu, which can come from
2059                 * either vcore->dpdes or doorbell_request.
2060                 * On POWER8, doorbell_request is 0.
2061                 */
2062                *val = get_reg_val(id, vcpu->arch.vcore->dpdes |
2063                                   vcpu->arch.doorbell_request);
2064                break;
2065        case KVM_REG_PPC_VTB:
2066                *val = get_reg_val(id, vcpu->arch.vcore->vtb);
2067                break;
2068        case KVM_REG_PPC_DAWR:
2069                *val = get_reg_val(id, vcpu->arch.dawr0);
2070                break;
2071        case KVM_REG_PPC_DAWRX:
2072                *val = get_reg_val(id, vcpu->arch.dawrx0);
2073                break;
2074        case KVM_REG_PPC_DAWR1:
2075                *val = get_reg_val(id, vcpu->arch.dawr1);
2076                break;
2077        case KVM_REG_PPC_DAWRX1:
2078                *val = get_reg_val(id, vcpu->arch.dawrx1);
2079                break;
2080        case KVM_REG_PPC_CIABR:
2081                *val = get_reg_val(id, vcpu->arch.ciabr);
2082                break;
2083        case KVM_REG_PPC_CSIGR:
2084                *val = get_reg_val(id, vcpu->arch.csigr);
2085                break;
2086        case KVM_REG_PPC_TACR:
2087                *val = get_reg_val(id, vcpu->arch.tacr);
2088                break;
2089        case KVM_REG_PPC_TCSCR:
2090                *val = get_reg_val(id, vcpu->arch.tcscr);
2091                break;
2092        case KVM_REG_PPC_PID:
2093                *val = get_reg_val(id, vcpu->arch.pid);
2094                break;
2095        case KVM_REG_PPC_ACOP:
2096                *val = get_reg_val(id, vcpu->arch.acop);
2097                break;
2098        case KVM_REG_PPC_WORT:
2099                *val = get_reg_val(id, vcpu->arch.wort);
2100                break;
2101        case KVM_REG_PPC_TIDR:
2102                *val = get_reg_val(id, vcpu->arch.tid);
2103                break;
2104        case KVM_REG_PPC_PSSCR:
2105                *val = get_reg_val(id, vcpu->arch.psscr);
2106                break;
2107        case KVM_REG_PPC_VPA_ADDR:
2108                spin_lock(&vcpu->arch.vpa_update_lock);
2109                *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
2110                spin_unlock(&vcpu->arch.vpa_update_lock);
2111                break;
2112        case KVM_REG_PPC_VPA_SLB:
2113                spin_lock(&vcpu->arch.vpa_update_lock);
2114                val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
2115                val->vpaval.length = vcpu->arch.slb_shadow.len;
2116                spin_unlock(&vcpu->arch.vpa_update_lock);
2117                break;
2118        case KVM_REG_PPC_VPA_DTL:
2119                spin_lock(&vcpu->arch.vpa_update_lock);
2120                val->vpaval.addr = vcpu->arch.dtl.next_gpa;
2121                val->vpaval.length = vcpu->arch.dtl.len;
2122                spin_unlock(&vcpu->arch.vpa_update_lock);
2123                break;
2124        case KVM_REG_PPC_TB_OFFSET:
2125                *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
2126                break;
2127        case KVM_REG_PPC_LPCR:
2128        case KVM_REG_PPC_LPCR_64:
2129                *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
2130                break;
2131        case KVM_REG_PPC_PPR:
2132                *val = get_reg_val(id, vcpu->arch.ppr);
2133                break;
2134#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2135        case KVM_REG_PPC_TFHAR:
2136                *val = get_reg_val(id, vcpu->arch.tfhar);
2137                break;
2138        case KVM_REG_PPC_TFIAR:
2139                *val = get_reg_val(id, vcpu->arch.tfiar);
2140                break;
2141        case KVM_REG_PPC_TEXASR:
2142                *val = get_reg_val(id, vcpu->arch.texasr);
2143                break;
2144        case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
2145                i = id - KVM_REG_PPC_TM_GPR0;
2146                *val = get_reg_val(id, vcpu->arch.gpr_tm[i]);
2147                break;
2148        case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
2149        {
2150                int j;
2151                i = id - KVM_REG_PPC_TM_VSR0;
2152                if (i < 32)
2153                        for (j = 0; j < TS_FPRWIDTH; j++)
2154                                val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
2155                else {
2156                        if (cpu_has_feature(CPU_FTR_ALTIVEC))
2157                                val->vval = vcpu->arch.vr_tm.vr[i-32];
2158                        else
2159                                r = -ENXIO;
2160                }
2161                break;
2162        }
2163        case KVM_REG_PPC_TM_CR:
2164                *val = get_reg_val(id, vcpu->arch.cr_tm);
2165                break;
2166        case KVM_REG_PPC_TM_XER:
2167                *val = get_reg_val(id, vcpu->arch.xer_tm);
2168                break;
2169        case KVM_REG_PPC_TM_LR:
2170                *val = get_reg_val(id, vcpu->arch.lr_tm);
2171                break;
2172        case KVM_REG_PPC_TM_CTR:
2173                *val = get_reg_val(id, vcpu->arch.ctr_tm);
2174                break;
2175        case KVM_REG_PPC_TM_FPSCR:
2176                *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
2177                break;
2178        case KVM_REG_PPC_TM_AMR:
2179                *val = get_reg_val(id, vcpu->arch.amr_tm);
2180                break;
2181        case KVM_REG_PPC_TM_PPR:
2182                *val = get_reg_val(id, vcpu->arch.ppr_tm);
2183                break;
2184        case KVM_REG_PPC_TM_VRSAVE:
2185                *val = get_reg_val(id, vcpu->arch.vrsave_tm);
2186                break;
2187        case KVM_REG_PPC_TM_VSCR:
2188                if (cpu_has_feature(CPU_FTR_ALTIVEC))
2189                        *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
2190                else
2191                        r = -ENXIO;
2192                break;
2193        case KVM_REG_PPC_TM_DSCR:
2194                *val = get_reg_val(id, vcpu->arch.dscr_tm);
2195                break;
2196        case KVM_REG_PPC_TM_TAR:
2197                *val = get_reg_val(id, vcpu->arch.tar_tm);
2198                break;
2199#endif
2200        case KVM_REG_PPC_ARCH_COMPAT:
2201                *val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
2202                break;
2203        case KVM_REG_PPC_DEC_EXPIRY:
2204                *val = get_reg_val(id, vcpu->arch.dec_expires +
2205                                   vcpu->arch.vcore->tb_offset);
2206                break;
2207        case KVM_REG_PPC_ONLINE:
2208                *val = get_reg_val(id, vcpu->arch.online);
2209                break;
2210        case KVM_REG_PPC_PTCR:
2211                *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr);
2212                break;
2213        default:
2214                r = -EINVAL;
2215                break;
2216        }
2217
2218        return r;
2219}
2220
2221static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
2222                                 union kvmppc_one_reg *val)
2223{
2224        int r = 0;
2225        long int i;
2226        unsigned long addr, len;
2227
2228        switch (id) {
2229        case KVM_REG_PPC_HIOR:
2230                /* Only allow this to be set to zero */
2231                if (set_reg_val(id, *val))
2232                        r = -EINVAL;
2233                break;
2234        case KVM_REG_PPC_DABR:
2235                vcpu->arch.dabr = set_reg_val(id, *val);
2236                break;
2237        case KVM_REG_PPC_DABRX:
2238                vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
2239                break;
2240        case KVM_REG_PPC_DSCR:
2241                vcpu->arch.dscr = set_reg_val(id, *val);
2242                break;
2243        case KVM_REG_PPC_PURR:
2244                vcpu->arch.purr = set_reg_val(id, *val);
2245                break;
2246        case KVM_REG_PPC_SPURR:
2247                vcpu->arch.spurr = set_reg_val(id, *val);
2248                break;
2249        case KVM_REG_PPC_AMR:
2250                vcpu->arch.amr = set_reg_val(id, *val);
2251                break;
2252        case KVM_REG_PPC_UAMOR:
2253                vcpu->arch.uamor = set_reg_val(id, *val);
2254                break;
2255        case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
2256                i = id - KVM_REG_PPC_MMCR0;
2257                vcpu->arch.mmcr[i] = set_reg_val(id, *val);
2258                break;
2259        case KVM_REG_PPC_MMCR2:
2260                vcpu->arch.mmcr[2] = set_reg_val(id, *val);
2261                break;
2262        case KVM_REG_PPC_MMCRA:
2263                vcpu->arch.mmcra = set_reg_val(id, *val);
2264                break;
2265        case KVM_REG_PPC_MMCRS:
2266                vcpu->arch.mmcrs = set_reg_val(id, *val);
2267                break;
2268        case KVM_REG_PPC_MMCR3:
2269                *val = get_reg_val(id, vcpu->arch.mmcr[3]);
2270                break;
2271        case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
2272                i = id - KVM_REG_PPC_PMC1;
2273                vcpu->arch.pmc[i] = set_reg_val(id, *val);
2274                break;
2275        case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
2276                i = id - KVM_REG_PPC_SPMC1;
2277                vcpu->arch.spmc[i] = set_reg_val(id, *val);
2278                break;
2279        case KVM_REG_PPC_SIAR:
2280                vcpu->arch.siar = set_reg_val(id, *val);
2281                break;
2282        case KVM_REG_PPC_SDAR:
2283                vcpu->arch.sdar = set_reg_val(id, *val);
2284                break;
2285        case KVM_REG_PPC_SIER:
2286                vcpu->arch.sier[0] = set_reg_val(id, *val);
2287                break;
2288        case KVM_REG_PPC_SIER2:
2289                vcpu->arch.sier[1] = set_reg_val(id, *val);
2290                break;
2291        case KVM_REG_PPC_SIER3:
2292                vcpu->arch.sier[2] = set_reg_val(id, *val);
2293                break;
2294        case KVM_REG_PPC_IAMR:
2295                vcpu->arch.iamr = set_reg_val(id, *val);
2296                break;
2297        case KVM_REG_PPC_PSPB:
2298                vcpu->arch.pspb = set_reg_val(id, *val);
2299                break;
2300        case KVM_REG_PPC_DPDES:
2301                vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
2302                break;
2303        case KVM_REG_PPC_VTB:
2304                vcpu->arch.vcore->vtb = set_reg_val(id, *val);
2305                break;
2306        case KVM_REG_PPC_DAWR:
2307                vcpu->arch.dawr0 = set_reg_val(id, *val);
2308                break;
2309        case KVM_REG_PPC_DAWRX:
2310                vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP;
2311                break;
2312        case KVM_REG_PPC_DAWR1:
2313                vcpu->arch.dawr1 = set_reg_val(id, *val);
2314                break;
2315        case KVM_REG_PPC_DAWRX1:
2316                vcpu->arch.dawrx1 = set_reg_val(id, *val) & ~DAWRX_HYP;
2317                break;
2318        case KVM_REG_PPC_CIABR:
2319                vcpu->arch.ciabr = set_reg_val(id, *val);
2320                /* Don't allow setting breakpoints in hypervisor code */
2321                if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
2322                        vcpu->arch.ciabr &= ~CIABR_PRIV;        /* disable */
2323                break;
2324        case KVM_REG_PPC_CSIGR:
2325                vcpu->arch.csigr = set_reg_val(id, *val);
2326                break;
2327        case KVM_REG_PPC_TACR:
2328                vcpu->arch.tacr = set_reg_val(id, *val);
2329                break;
2330        case KVM_REG_PPC_TCSCR:
2331                vcpu->arch.tcscr = set_reg_val(id, *val);
2332                break;
2333        case KVM_REG_PPC_PID:
2334                vcpu->arch.pid = set_reg_val(id, *val);
2335                break;
2336        case KVM_REG_PPC_ACOP:
2337                vcpu->arch.acop = set_reg_val(id, *val);
2338                break;
2339        case KVM_REG_PPC_WORT:
2340                vcpu->arch.wort = set_reg_val(id, *val);
2341                break;
2342        case KVM_REG_PPC_TIDR:
2343                vcpu->arch.tid = set_reg_val(id, *val);
2344                break;
2345        case KVM_REG_PPC_PSSCR:
2346                vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS;
2347                break;
2348        case KVM_REG_PPC_VPA_ADDR:
2349                addr = set_reg_val(id, *val);
2350                r = -EINVAL;
2351                if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
2352                              vcpu->arch.dtl.next_gpa))
2353                        break;
2354                r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
2355                break;
2356        case KVM_REG_PPC_VPA_SLB:
2357                addr = val->vpaval.addr;
2358                len = val->vpaval.length;
2359                r = -EINVAL;
2360                if (addr && !vcpu->arch.vpa.next_gpa)
2361                        break;
2362                r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
2363                break;
2364        case KVM_REG_PPC_VPA_DTL:
2365                addr = val->vpaval.addr;
2366                len = val->vpaval.length;
2367                r = -EINVAL;
2368                if (addr && (len < sizeof(struct dtl_entry) ||
2369                             !vcpu->arch.vpa.next_gpa))
2370                        break;
2371                len -= len % sizeof(struct dtl_entry);
2372                r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
2373                break;
2374        case KVM_REG_PPC_TB_OFFSET:
2375                /* round up to multiple of 2^24 */
2376                vcpu->arch.vcore->tb_offset =
2377                        ALIGN(set_reg_val(id, *val), 1UL << 24);
2378                break;
2379        case KVM_REG_PPC_LPCR:
2380                kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true);
2381                break;
2382        case KVM_REG_PPC_LPCR_64:
2383                kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
2384                break;
2385        case KVM_REG_PPC_PPR:
2386                vcpu->arch.ppr = set_reg_val(id, *val);
2387                break;
2388#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2389        case KVM_REG_PPC_TFHAR:
2390                vcpu->arch.tfhar = set_reg_val(id, *val);
2391                break;
2392        case KVM_REG_PPC_TFIAR:
2393                vcpu->arch.tfiar = set_reg_val(id, *val);
2394                break;
2395        case KVM_REG_PPC_TEXASR:
2396                vcpu->arch.texasr = set_reg_val(id, *val);
2397                break;
2398        case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
2399                i = id - KVM_REG_PPC_TM_GPR0;
2400                vcpu->arch.gpr_tm[i] = set_reg_val(id, *val);
2401                break;
2402        case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
2403        {
2404                int j;
2405                i = id - KVM_REG_PPC_TM_VSR0;
2406                if (i < 32)
2407                        for (j = 0; j < TS_FPRWIDTH; j++)
2408                                vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
2409                else
2410                        if (cpu_has_feature(CPU_FTR_ALTIVEC))
2411                                vcpu->arch.vr_tm.vr[i-32] = val->vval;
2412                        else
2413                                r = -ENXIO;
2414                break;
2415        }
2416        case KVM_REG_PPC_TM_CR:
2417                vcpu->arch.cr_tm = set_reg_val(id, *val);
2418                break;
2419        case KVM_REG_PPC_TM_XER:
2420                vcpu->arch.xer_tm = set_reg_val(id, *val);
2421                break;
2422        case KVM_REG_PPC_TM_LR:
2423                vcpu->arch.lr_tm = set_reg_val(id, *val);
2424                break;
2425        case KVM_REG_PPC_TM_CTR:
2426                vcpu->arch.ctr_tm = set_reg_val(id, *val);
2427                break;
2428        case KVM_REG_PPC_TM_FPSCR:
2429                vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
2430                break;
2431        case KVM_REG_PPC_TM_AMR:
2432                vcpu->arch.amr_tm = set_reg_val(id, *val);
2433                break;
2434        case KVM_REG_PPC_TM_PPR:
2435                vcpu->arch.ppr_tm = set_reg_val(id, *val);
2436                break;
2437        case KVM_REG_PPC_TM_VRSAVE:
2438                vcpu->arch.vrsave_tm = set_reg_val(id, *val);
2439                break;
2440        case KVM_REG_PPC_TM_VSCR:
2441                if (cpu_has_feature(CPU_FTR_ALTIVEC))
2442                        vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
2443                else
2444                        r = - ENXIO;
2445                break;
2446        case KVM_REG_PPC_TM_DSCR:
2447                vcpu->arch.dscr_tm = set_reg_val(id, *val);
2448                break;
2449        case KVM_REG_PPC_TM_TAR:
2450                vcpu->arch.tar_tm = set_reg_val(id, *val);
2451                break;
2452#endif
2453        case KVM_REG_PPC_ARCH_COMPAT:
2454                r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
2455                break;
2456        case KVM_REG_PPC_DEC_EXPIRY:
2457                vcpu->arch.dec_expires = set_reg_val(id, *val) -
2458                        vcpu->arch.vcore->tb_offset;
2459                break;
2460        case KVM_REG_PPC_ONLINE:
2461                i = set_reg_val(id, *val);
2462                if (i && !vcpu->arch.online)
2463                        atomic_inc(&vcpu->arch.vcore->online_count);
2464                else if (!i && vcpu->arch.online)
2465                        atomic_dec(&vcpu->arch.vcore->online_count);
2466                vcpu->arch.online = i;
2467                break;
2468        case KVM_REG_PPC_PTCR:
2469                vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val);
2470                break;
2471        default:
2472                r = -EINVAL;
2473                break;
2474        }
2475
2476        return r;
2477}
2478
2479/*
2480 * On POWER9, threads are independent and can be in different partitions.
2481 * Therefore we consider each thread to be a subcore.
2482 * There is a restriction that all threads have to be in the same
2483 * MMU mode (radix or HPT), unfortunately, but since we only support
2484 * HPT guests on a HPT host so far, that isn't an impediment yet.
2485 */
2486static int threads_per_vcore(struct kvm *kvm)
2487{
2488        if (cpu_has_feature(CPU_FTR_ARCH_300))
2489                return 1;
2490        return threads_per_subcore;
2491}
2492
2493static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int id)
2494{
2495        struct kvmppc_vcore *vcore;
2496
2497        vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
2498
2499        if (vcore == NULL)
2500                return NULL;
2501
2502        spin_lock_init(&vcore->lock);
2503        spin_lock_init(&vcore->stoltb_lock);
2504        rcuwait_init(&vcore->wait);
2505        vcore->preempt_tb = TB_NIL;
2506        vcore->lpcr = kvm->arch.lpcr;
2507        vcore->first_vcpuid = id;
2508        vcore->kvm = kvm;
2509        INIT_LIST_HEAD(&vcore->preempt_list);
2510
2511        return vcore;
2512}
2513
2514#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2515static struct debugfs_timings_element {
2516        const char *name;
2517        size_t offset;
2518} timings[] = {
2519        {"rm_entry",    offsetof(struct kvm_vcpu, arch.rm_entry)},
2520        {"rm_intr",     offsetof(struct kvm_vcpu, arch.rm_intr)},
2521        {"rm_exit",     offsetof(struct kvm_vcpu, arch.rm_exit)},
2522        {"guest",       offsetof(struct kvm_vcpu, arch.guest_time)},
2523        {"cede",        offsetof(struct kvm_vcpu, arch.cede_time)},
2524};
2525
2526#define N_TIMINGS       (ARRAY_SIZE(timings))
2527
2528struct debugfs_timings_state {
2529        struct kvm_vcpu *vcpu;
2530        unsigned int    buflen;
2531        char            buf[N_TIMINGS * 100];
2532};
2533
2534static int debugfs_timings_open(struct inode *inode, struct file *file)
2535{
2536        struct kvm_vcpu *vcpu = inode->i_private;
2537        struct debugfs_timings_state *p;
2538
2539        p = kzalloc(sizeof(*p), GFP_KERNEL);
2540        if (!p)
2541                return -ENOMEM;
2542
2543        kvm_get_kvm(vcpu->kvm);
2544        p->vcpu = vcpu;
2545        file->private_data = p;
2546
2547        return nonseekable_open(inode, file);
2548}
2549
2550static int debugfs_timings_release(struct inode *inode, struct file *file)
2551{
2552        struct debugfs_timings_state *p = file->private_data;
2553
2554        kvm_put_kvm(p->vcpu->kvm);
2555        kfree(p);
2556        return 0;
2557}
2558
2559static ssize_t debugfs_timings_read(struct file *file, char __user *buf,
2560                                    size_t len, loff_t *ppos)
2561{
2562        struct debugfs_timings_state *p = file->private_data;
2563        struct kvm_vcpu *vcpu = p->vcpu;
2564        char *s, *buf_end;
2565        struct kvmhv_tb_accumulator tb;
2566        u64 count;
2567        loff_t pos;
2568        ssize_t n;
2569        int i, loops;
2570        bool ok;
2571
2572        if (!p->buflen) {
2573                s = p->buf;
2574                buf_end = s + sizeof(p->buf);
2575                for (i = 0; i < N_TIMINGS; ++i) {
2576                        struct kvmhv_tb_accumulator *acc;
2577
2578                        acc = (struct kvmhv_tb_accumulator *)
2579                                ((unsigned long)vcpu + timings[i].offset);
2580                        ok = false;
2581                        for (loops = 0; loops < 1000; ++loops) {
2582                                count = acc->seqcount;
2583                                if (!(count & 1)) {
2584                                        smp_rmb();
2585                                        tb = *acc;
2586                                        smp_rmb();
2587                                        if (count == acc->seqcount) {
2588                                                ok = true;
2589                                                break;
2590                                        }
2591                                }
2592                                udelay(1);
2593                        }
2594                        if (!ok)
2595                                snprintf(s, buf_end - s, "%s: stuck\n",
2596                                        timings[i].name);
2597                        else
2598                                snprintf(s, buf_end - s,
2599                                        "%s: %llu %llu %llu %llu\n",
2600                                        timings[i].name, count / 2,
2601                                        tb_to_ns(tb.tb_total),
2602                                        tb_to_ns(tb.tb_min),
2603                                        tb_to_ns(tb.tb_max));
2604                        s += strlen(s);
2605                }
2606                p->buflen = s - p->buf;
2607        }
2608
2609        pos = *ppos;
2610        if (pos >= p->buflen)
2611                return 0;
2612        if (len > p->buflen - pos)
2613                len = p->buflen - pos;
2614        n = copy_to_user(buf, p->buf + pos, len);
2615        if (n) {
2616                if (n == len)
2617                        return -EFAULT;
2618                len -= n;
2619        }
2620        *ppos = pos + len;
2621        return len;
2622}
2623
2624static ssize_t debugfs_timings_write(struct file *file, const char __user *buf,
2625                                     size_t len, loff_t *ppos)
2626{
2627        return -EACCES;
2628}
2629
2630static const struct file_operations debugfs_timings_ops = {
2631        .owner   = THIS_MODULE,
2632        .open    = debugfs_timings_open,
2633        .release = debugfs_timings_release,
2634        .read    = debugfs_timings_read,
2635        .write   = debugfs_timings_write,
2636        .llseek  = generic_file_llseek,
2637};
2638
2639/* Create a debugfs directory for the vcpu */
2640static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
2641{
2642        char buf[16];
2643        struct kvm *kvm = vcpu->kvm;
2644
2645        snprintf(buf, sizeof(buf), "vcpu%u", id);
2646        vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir);
2647        debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir, vcpu,
2648                            &debugfs_timings_ops);
2649}
2650
2651#else /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
2652static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
2653{
2654}
2655#endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
2656
2657static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
2658{
2659        int err;
2660        int core;
2661        struct kvmppc_vcore *vcore;
2662        struct kvm *kvm;
2663        unsigned int id;
2664
2665        kvm = vcpu->kvm;
2666        id = vcpu->vcpu_id;
2667
2668        vcpu->arch.shared = &vcpu->arch.shregs;
2669#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2670        /*
2671         * The shared struct is never shared on HV,
2672         * so we can always use host endianness
2673         */
2674#ifdef __BIG_ENDIAN__
2675        vcpu->arch.shared_big_endian = true;
2676#else
2677        vcpu->arch.shared_big_endian = false;
2678#endif
2679#endif
2680        vcpu->arch.mmcr[0] = MMCR0_FC;
2681        vcpu->arch.ctrl = CTRL_RUNLATCH;
2682        /* default to host PVR, since we can't spoof it */
2683        kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
2684        spin_lock_init(&vcpu->arch.vpa_update_lock);
2685        spin_lock_init(&vcpu->arch.tbacct_lock);
2686        vcpu->arch.busy_preempt = TB_NIL;
2687        vcpu->arch.intr_msr = MSR_SF | MSR_ME;
2688
2689        /*
2690         * Set the default HFSCR for the guest from the host value.
2691         * This value is only used on POWER9.
2692         * On POWER9, we want to virtualize the doorbell facility, so we
2693         * don't set the HFSCR_MSGP bit, and that causes those instructions
2694         * to trap and then we emulate them.
2695         */
2696        vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
2697                HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP | HFSCR_PREFIX;
2698        if (cpu_has_feature(CPU_FTR_HVMODE)) {
2699                vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
2700#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2701                if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
2702                        vcpu->arch.hfscr |= HFSCR_TM;
2703#endif
2704        }
2705        if (cpu_has_feature(CPU_FTR_TM_COMP))
2706                vcpu->arch.hfscr |= HFSCR_TM;
2707
2708        kvmppc_mmu_book3s_hv_init(vcpu);
2709
2710        vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
2711
2712        init_waitqueue_head(&vcpu->arch.cpu_run);
2713
2714        mutex_lock(&kvm->lock);
2715        vcore = NULL;
2716        err = -EINVAL;
2717        if (cpu_has_feature(CPU_FTR_ARCH_300)) {
2718                if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) {
2719                        pr_devel("KVM: VCPU ID too high\n");
2720                        core = KVM_MAX_VCORES;
2721                } else {
2722                        BUG_ON(kvm->arch.smt_mode != 1);
2723                        core = kvmppc_pack_vcpu_id(kvm, id);
2724                }
2725        } else {
2726                core = id / kvm->arch.smt_mode;
2727        }
2728        if (core < KVM_MAX_VCORES) {
2729                vcore = kvm->arch.vcores[core];
2730                if (vcore && cpu_has_feature(CPU_FTR_ARCH_300)) {
2731                        pr_devel("KVM: collision on id %u", id);
2732                        vcore = NULL;
2733                } else if (!vcore) {
2734                        /*
2735                         * Take mmu_setup_lock for mutual exclusion
2736                         * with kvmppc_update_lpcr().
2737                         */
2738                        err = -ENOMEM;
2739                        vcore = kvmppc_vcore_create(kvm,
2740                                        id & ~(kvm->arch.smt_mode - 1));
2741                        mutex_lock(&kvm->arch.mmu_setup_lock);
2742                        kvm->arch.vcores[core] = vcore;
2743                        kvm->arch.online_vcores++;
2744                        mutex_unlock(&kvm->arch.mmu_setup_lock);
2745                }
2746        }
2747        mutex_unlock(&kvm->lock);
2748
2749        if (!vcore)
2750                return err;
2751
2752        spin_lock(&vcore->lock);
2753        ++vcore->num_threads;
2754        spin_unlock(&vcore->lock);
2755        vcpu->arch.vcore = vcore;
2756        vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
2757        vcpu->arch.thread_cpu = -1;
2758        vcpu->arch.prev_cpu = -1;
2759
2760        vcpu->arch.cpu_type = KVM_CPU_3S_64;
2761        kvmppc_sanity_check(vcpu);
2762
2763        debugfs_vcpu_init(vcpu, id);
2764
2765        return 0;
2766}
2767
2768static int kvmhv_set_smt_mode(struct kvm *kvm, unsigned long smt_mode,
2769                              unsigned long flags)
2770{
2771        int err;
2772        int esmt = 0;
2773
2774        if (flags)
2775                return -EINVAL;
2776        if (smt_mode > MAX_SMT_THREADS || !is_power_of_2(smt_mode))
2777                return -EINVAL;
2778        if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
2779                /*
2780                 * On POWER8 (or POWER7), the threading mode is "strict",
2781                 * so we pack smt_mode vcpus per vcore.
2782                 */
2783                if (smt_mode > threads_per_subcore)
2784                        return -EINVAL;
2785        } else {
2786                /*
2787                 * On POWER9, the threading mode is "loose",
2788                 * so each vcpu gets its own vcore.
2789                 */
2790                esmt = smt_mode;
2791                smt_mode = 1;
2792        }
2793        mutex_lock(&kvm->lock);
2794        err = -EBUSY;
2795        if (!kvm->arch.online_vcores) {
2796                kvm->arch.smt_mode = smt_mode;
2797                kvm->arch.emul_smt_mode = esmt;
2798                err = 0;
2799        }
2800        mutex_unlock(&kvm->lock);
2801
2802        return err;
2803}
2804
2805static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
2806{
2807        if (vpa->pinned_addr)
2808                kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
2809                                        vpa->dirty);
2810}
2811
2812static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
2813{
2814        spin_lock(&vcpu->arch.vpa_update_lock);
2815        unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
2816        unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
2817        unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
2818        spin_unlock(&vcpu->arch.vpa_update_lock);
2819}
2820
2821static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
2822{
2823        /* Indicate we want to get back into the guest */
2824        return 1;
2825}
2826
2827static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
2828{
2829        unsigned long dec_nsec, now;
2830
2831        now = get_tb();
2832        if (now > vcpu->arch.dec_expires) {
2833                /* decrementer has already gone negative */
2834                kvmppc_core_queue_dec(vcpu);
2835                kvmppc_core_prepare_to_enter(vcpu);
2836                return;
2837        }
2838        dec_nsec = tb_to_ns(vcpu->arch.dec_expires - now);
2839        hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL);
2840        vcpu->arch.timer_running = 1;
2841}
2842
2843extern int __kvmppc_vcore_entry(void);
2844
2845static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
2846                                   struct kvm_vcpu *vcpu)
2847{
2848        u64 now;
2849
2850        if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
2851                return;
2852        spin_lock_irq(&vcpu->arch.tbacct_lock);
2853        now = mftb();
2854        vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
2855                vcpu->arch.stolen_logged;
2856        vcpu->arch.busy_preempt = now;
2857        vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
2858        spin_unlock_irq(&vcpu->arch.tbacct_lock);
2859        --vc->n_runnable;
2860        WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL);
2861}
2862
2863static int kvmppc_grab_hwthread(int cpu)
2864{
2865        struct paca_struct *tpaca;
2866        long timeout = 10000;
2867
2868        tpaca = paca_ptrs[cpu];
2869
2870        /* Ensure the thread won't go into the kernel if it wakes */
2871        tpaca->kvm_hstate.kvm_vcpu = NULL;
2872        tpaca->kvm_hstate.kvm_vcore = NULL;
2873        tpaca->kvm_hstate.napping = 0;
2874        smp_wmb();
2875        tpaca->kvm_hstate.hwthread_req = 1;
2876
2877        /*
2878         * If the thread is already executing in the kernel (e.g. handling
2879         * a stray interrupt), wait for it to get back to nap mode.
2880         * The smp_mb() is to ensure that our setting of hwthread_req
2881         * is visible before we look at hwthread_state, so if this
2882         * races with the code at system_reset_pSeries and the thread
2883         * misses our setting of hwthread_req, we are sure to see its
2884         * setting of hwthread_state, and vice versa.
2885         */
2886        smp_mb();
2887        while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) {
2888                if (--timeout <= 0) {
2889                        pr_err("KVM: couldn't grab cpu %d\n", cpu);
2890                        return -EBUSY;
2891                }
2892                udelay(1);
2893        }
2894        return 0;
2895}
2896
2897static void kvmppc_release_hwthread(int cpu)
2898{
2899        struct paca_struct *tpaca;
2900
2901        tpaca = paca_ptrs[cpu];
2902        tpaca->kvm_hstate.hwthread_req = 0;
2903        tpaca->kvm_hstate.kvm_vcpu = NULL;
2904        tpaca->kvm_hstate.kvm_vcore = NULL;
2905        tpaca->kvm_hstate.kvm_split_mode = NULL;
2906}
2907
2908static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
2909{
2910        struct kvm_nested_guest *nested = vcpu->arch.nested;
2911        cpumask_t *cpu_in_guest;
2912        int i;
2913
2914        cpu = cpu_first_tlb_thread_sibling(cpu);
2915        if (nested) {
2916                cpumask_set_cpu(cpu, &nested->need_tlb_flush);
2917                cpu_in_guest = &nested->cpu_in_guest;
2918        } else {
2919                cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush);
2920                cpu_in_guest = &kvm->arch.cpu_in_guest;
2921        }
2922        /*
2923         * Make sure setting of bit in need_tlb_flush precedes
2924         * testing of cpu_in_guest bits.  The matching barrier on
2925         * the other side is the first smp_mb() in kvmppc_run_core().
2926         */
2927        smp_mb();
2928        for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
2929                                        i += cpu_tlb_thread_sibling_step())
2930                if (cpumask_test_cpu(i, cpu_in_guest))
2931                        smp_call_function_single(i, do_nothing, NULL, 1);
2932}
2933
2934static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
2935{
2936        struct kvm_nested_guest *nested = vcpu->arch.nested;
2937        struct kvm *kvm = vcpu->kvm;
2938        int prev_cpu;
2939
2940        if (!cpu_has_feature(CPU_FTR_HVMODE))
2941                return;
2942
2943        if (nested)
2944                prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id];
2945        else
2946                prev_cpu = vcpu->arch.prev_cpu;
2947
2948        /*
2949         * With radix, the guest can do TLB invalidations itself,
2950         * and it could choose to use the local form (tlbiel) if
2951         * it is invalidating a translation that has only ever been
2952         * used on one vcpu.  However, that doesn't mean it has
2953         * only ever been used on one physical cpu, since vcpus
2954         * can move around between pcpus.  To cope with this, when
2955         * a vcpu moves from one pcpu to another, we need to tell
2956         * any vcpus running on the same core as this vcpu previously
2957         * ran to flush the TLB.  The TLB is shared between threads,
2958         * so we use a single bit in .need_tlb_flush for all 4 threads.
2959         */
2960        if (prev_cpu != pcpu) {
2961                if (prev_cpu >= 0 &&
2962                    cpu_first_tlb_thread_sibling(prev_cpu) !=
2963                    cpu_first_tlb_thread_sibling(pcpu))
2964                        radix_flush_cpu(kvm, prev_cpu, vcpu);
2965                if (nested)
2966                        nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
2967                else
2968                        vcpu->arch.prev_cpu = pcpu;
2969        }
2970}
2971
2972static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
2973{
2974        int cpu;
2975        struct paca_struct *tpaca;
2976        struct kvm *kvm = vc->kvm;
2977
2978        cpu = vc->pcpu;
2979        if (vcpu) {
2980                if (vcpu->arch.timer_running) {
2981                        hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
2982                        vcpu->arch.timer_running = 0;
2983                }
2984                cpu += vcpu->arch.ptid;
2985                vcpu->cpu = vc->pcpu;
2986                vcpu->arch.thread_cpu = cpu;
2987                cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest);
2988        }
2989        tpaca = paca_ptrs[cpu];
2990        tpaca->kvm_hstate.kvm_vcpu = vcpu;
2991        tpaca->kvm_hstate.ptid = cpu - vc->pcpu;
2992        tpaca->kvm_hstate.fake_suspend = 0;
2993        /* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */
2994        smp_wmb();
2995        tpaca->kvm_hstate.kvm_vcore = vc;
2996        if (cpu != smp_processor_id())
2997                kvmppc_ipi_thread(cpu);
2998}
2999
3000static void kvmppc_wait_for_nap(int n_threads)
3001{
3002        int cpu = smp_processor_id();
3003        int i, loops;
3004
3005        if (n_threads <= 1)
3006                return;
3007        for (loops = 0; loops < 1000000; ++loops) {
3008                /*
3009                 * Check if all threads are finished.
3010                 * We set the vcore pointer when starting a thread
3011                 * and the thread clears it when finished, so we look
3012                 * for any threads that still have a non-NULL vcore ptr.
3013                 */
3014                for (i = 1; i < n_threads; ++i)
3015                        if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore)
3016                                break;
3017                if (i == n_threads) {
3018                        HMT_medium();
3019                        return;
3020                }
3021                HMT_low();
3022        }
3023        HMT_medium();
3024        for (i = 1; i < n_threads; ++i)
3025                if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore)
3026                        pr_err("KVM: CPU %d seems to be stuck\n", cpu + i);
3027}
3028
3029/*
3030 * Check that we are on thread 0 and that any other threads in
3031 * this core are off-line.  Then grab the threads so they can't
3032 * enter the kernel.
3033 */
3034static int on_primary_thread(void)
3035{
3036        int cpu = smp_processor_id();
3037        int thr;
3038
3039        /* Are we on a primary subcore? */
3040        if (cpu_thread_in_subcore(cpu))
3041                return 0;
3042
3043        thr = 0;
3044        while (++thr < threads_per_subcore)
3045                if (cpu_online(cpu + thr))
3046                        return 0;
3047
3048        /* Grab all hw threads so they can't go into the kernel */
3049        for (thr = 1; thr < threads_per_subcore; ++thr) {
3050                if (kvmppc_grab_hwthread(cpu + thr)) {
3051                        /* Couldn't grab one; let the others go */
3052                        do {
3053                                kvmppc_release_hwthread(cpu + thr);
3054                        } while (--thr > 0);
3055                        return 0;
3056                }
3057        }
3058        return 1;
3059}
3060
3061/*
3062 * A list of virtual cores for each physical CPU.
3063 * These are vcores that could run but their runner VCPU tasks are
3064 * (or may be) preempted.
3065 */
3066struct preempted_vcore_list {
3067        struct list_head        list;
3068        spinlock_t              lock;
3069};
3070
3071static DEFINE_PER_CPU(struct preempted_vcore_list, preempted_vcores);
3072
3073static void init_vcore_lists(void)
3074{
3075        int cpu;
3076
3077        for_each_possible_cpu(cpu) {
3078                struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu);
3079                spin_lock_init(&lp->lock);
3080                INIT_LIST_HEAD(&lp->list);
3081        }
3082}
3083
3084static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
3085{
3086        struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
3087
3088        vc->vcore_state = VCORE_PREEMPT;
3089        vc->pcpu = smp_processor_id();
3090        if (vc->num_threads < threads_per_vcore(vc->kvm)) {
3091                spin_lock(&lp->lock);
3092                list_add_tail(&vc->preempt_list, &lp->list);
3093                spin_unlock(&lp->lock);
3094        }
3095
3096        /* Start accumulating stolen time */
3097        kvmppc_core_start_stolen(vc);
3098}
3099
3100static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc)
3101{
3102        struct preempted_vcore_list *lp;
3103
3104        kvmppc_core_end_stolen(vc);
3105        if (!list_empty(&vc->preempt_list)) {
3106                lp = &per_cpu(preempted_vcores, vc->pcpu);
3107                spin_lock(&lp->lock);
3108                list_del_init(&vc->preempt_list);
3109                spin_unlock(&lp->lock);
3110        }
3111        vc->vcore_state = VCORE_INACTIVE;
3112}
3113
3114/*
3115 * This stores information about the virtual cores currently
3116 * assigned to a physical core.
3117 */
3118struct core_info {
3119        int             n_subcores;
3120        int             max_subcore_threads;
3121        int             total_threads;
3122        int             subcore_threads[MAX_SUBCORES];
3123        struct kvmppc_vcore *vc[MAX_SUBCORES];
3124};
3125
3126/*
3127 * This mapping means subcores 0 and 1 can use threads 0-3 and 4-7
3128 * respectively in 2-way micro-threading (split-core) mode on POWER8.
3129 */
3130static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 };
3131
3132static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc)
3133{
3134        memset(cip, 0, sizeof(*cip));
3135        cip->n_subcores = 1;
3136        cip->max_subcore_threads = vc->num_threads;
3137        cip->total_threads = vc->num_threads;
3138        cip->subcore_threads[0] = vc->num_threads;
3139        cip->vc[0] = vc;
3140}
3141
3142static bool subcore_config_ok(int n_subcores, int n_threads)
3143{
3144        /*
3145         * POWER9 "SMT4" cores are permanently in what is effectively a 4-way
3146         * split-core mode, with one thread per subcore.
3147         */
3148        if (cpu_has_feature(CPU_FTR_ARCH_300))
3149                return n_subcores <= 4 && n_threads == 1;
3150
3151        /* On POWER8, can only dynamically split if unsplit to begin with */
3152        if (n_subcores > 1 && threads_per_subcore < MAX_SMT_THREADS)
3153                return false;
3154        if (n_subcores > MAX_SUBCORES)
3155                return false;
3156        if (n_subcores > 1) {
3157                if (!(dynamic_mt_modes & 2))
3158                        n_subcores = 4;
3159                if (n_subcores > 2 && !(dynamic_mt_modes & 4))
3160                        return false;
3161        }
3162
3163        return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS;
3164}
3165
3166static void init_vcore_to_run(struct kvmppc_vcore *vc)
3167{
3168        vc->entry_exit_map = 0;
3169        vc->in_guest = 0;
3170        vc->napping_threads = 0;
3171        vc->conferring_threads = 0;
3172        vc->tb_offset_applied = 0;
3173}
3174
3175static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
3176{
3177        int n_threads = vc->num_threads;
3178        int sub;
3179
3180        if (!cpu_has_feature(CPU_FTR_ARCH_207S))
3181                return false;
3182
3183        /* In one_vm_per_core mode, require all vcores to be from the same vm */
3184        if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm)
3185                return false;
3186
3187        if (n_threads < cip->max_subcore_threads)
3188                n_threads = cip->max_subcore_threads;
3189        if (!subcore_config_ok(cip->n_subcores + 1, n_threads))
3190                return false;
3191        cip->max_subcore_threads = n_threads;
3192
3193        sub = cip->n_subcores;
3194        ++cip->n_subcores;
3195        cip->total_threads += vc->num_threads;
3196        cip->subcore_threads[sub] = vc->num_threads;
3197        cip->vc[sub] = vc;
3198        init_vcore_to_run(vc);
3199        list_del_init(&vc->preempt_list);
3200
3201        return true;
3202}
3203
3204/*
3205 * Work out whether it is possible to piggyback the execution of
3206 * vcore *pvc onto the execution of the other vcores described in *cip.
3207 */
3208static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip,
3209                          int target_threads)
3210{
3211        if (cip->total_threads + pvc->num_threads > target_threads)
3212                return false;
3213
3214        return can_dynamic_split(pvc, cip);
3215}
3216
3217static void prepare_threads(struct kvmppc_vcore *vc)
3218{
3219        int i;
3220        struct kvm_vcpu *vcpu;
3221
3222        for_each_runnable_thread(i, vcpu, vc) {
3223                if (signal_pending(vcpu->arch.run_task))
3224                        vcpu->arch.ret = -EINTR;
3225                else if (vcpu->arch.vpa.update_pending ||
3226                         vcpu->arch.slb_shadow.update_pending ||
3227                         vcpu->arch.dtl.update_pending)
3228                        vcpu->arch.ret = RESUME_GUEST;
3229                else
3230                        continue;
3231                kvmppc_remove_runnable(vc, vcpu);
3232                wake_up(&vcpu->arch.cpu_run);
3233        }
3234}
3235
3236static void collect_piggybacks(struct core_info *cip, int target_threads)
3237{
3238        struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
3239        struct kvmppc_vcore *pvc, *vcnext;
3240
3241        spin_lock(&lp->lock);
3242        list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) {
3243                if (!spin_trylock(&pvc->lock))
3244                        continue;
3245                prepare_threads(pvc);
3246                if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) {
3247                        list_del_init(&pvc->preempt_list);
3248                        if (pvc->runner == NULL) {
3249                                pvc->vcore_state = VCORE_INACTIVE;
3250                                kvmppc_core_end_stolen(pvc);
3251                        }
3252                        spin_unlock(&pvc->lock);
3253                        continue;
3254                }
3255                if (!can_piggyback(pvc, cip, target_threads)) {
3256                        spin_unlock(&pvc->lock);
3257                        continue;
3258                }
3259                kvmppc_core_end_stolen(pvc);
3260                pvc->vcore_state = VCORE_PIGGYBACK;
3261                if (cip->total_threads >= target_threads)
3262                        break;
3263        }
3264        spin_unlock(&lp->lock);
3265}
3266
3267static bool recheck_signals_and_mmu(struct core_info *cip)
3268{
3269        int sub, i;
3270        struct kvm_vcpu *vcpu;
3271        struct kvmppc_vcore *vc;
3272
3273        for (sub = 0; sub < cip->n_subcores; ++sub) {
3274                vc = cip->vc[sub];
3275                if (!vc->kvm->arch.mmu_ready)
3276                        return true;
3277                for_each_runnable_thread(i, vcpu, vc)
3278                        if (signal_pending(vcpu->arch.run_task))
3279                                return true;
3280        }
3281        return false;
3282}
3283
3284static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
3285{
3286        int still_running = 0, i;
3287        u64 now;
3288        long ret;
3289        struct kvm_vcpu *vcpu;
3290
3291        spin_lock(&vc->lock);
3292        now = get_tb();
3293        for_each_runnable_thread(i, vcpu, vc) {
3294                /*
3295                 * It's safe to unlock the vcore in the loop here, because
3296                 * for_each_runnable_thread() is safe against removal of
3297                 * the vcpu, and the vcore state is VCORE_EXITING here,
3298                 * so any vcpus becoming runnable will have their arch.trap
3299                 * set to zero and can't actually run in the guest.
3300                 */
3301                spin_unlock(&vc->lock);
3302                /* cancel pending dec exception if dec is positive */
3303                if (now < vcpu->arch.dec_expires &&
3304                    kvmppc_core_pending_dec(vcpu))
3305                        kvmppc_core_dequeue_dec(vcpu);
3306
3307                trace_kvm_guest_exit(vcpu);
3308
3309                ret = RESUME_GUEST;
3310                if (vcpu->arch.trap)
3311                        ret = kvmppc_handle_exit_hv(vcpu,
3312                                                    vcpu->arch.run_task);
3313
3314                vcpu->arch.ret = ret;
3315                vcpu->arch.trap = 0;
3316
3317                spin_lock(&vc->lock);
3318                if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
3319                        if (vcpu->arch.pending_exceptions)
3320                                kvmppc_core_prepare_to_enter(vcpu);
3321                        if (vcpu->arch.ceded)
3322                                kvmppc_set_timer(vcpu);
3323                        else
3324                                ++still_running;
3325                } else {
3326                        kvmppc_remove_runnable(vc, vcpu);
3327                        wake_up(&vcpu->arch.cpu_run);
3328                }
3329        }
3330        if (!is_master) {
3331                if (still_running > 0) {
3332                        kvmppc_vcore_preempt(vc);
3333                } else if (vc->runner) {
3334                        vc->vcore_state = VCORE_PREEMPT;
3335                        kvmppc_core_start_stolen(vc);
3336                } else {
3337                        vc->vcore_state = VCORE_INACTIVE;
3338                }
3339                if (vc->n_runnable > 0 && vc->runner == NULL) {
3340                        /* make sure there's a candidate runner awake */
3341                        i = -1;
3342                        vcpu = next_runnable_thread(vc, &i);
3343                        wake_up(&vcpu->arch.cpu_run);
3344                }
3345        }
3346        spin_unlock(&vc->lock);
3347}
3348
3349/*
3350 * Clear core from the list of active host cores as we are about to
3351 * enter the guest. Only do this if it is the primary thread of the
3352 * core (not if a subcore) that is entering the guest.
3353 */
3354static inline int kvmppc_clear_host_core(unsigned int cpu)
3355{
3356        int core;
3357
3358        if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
3359                return 0;
3360        /*
3361         * Memory barrier can be omitted here as we will do a smp_wmb()
3362         * later in kvmppc_start_thread and we need ensure that state is
3363         * visible to other CPUs only after we enter guest.
3364         */
3365        core = cpu >> threads_shift;
3366        kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0;
3367        return 0;
3368}
3369
3370/*
3371 * Advertise this core as an active host core since we exited the guest
3372 * Only need to do this if it is the primary thread of the core that is
3373 * exiting.
3374 */
3375static inline int kvmppc_set_host_core(unsigned int cpu)
3376{
3377        int core;
3378
3379        if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
3380                return 0;
3381
3382        /*
3383         * Memory barrier can be omitted here because we do a spin_unlock
3384         * immediately after this which provides the memory barrier.
3385         */
3386        core = cpu >> threads_shift;
3387        kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1;
3388        return 0;
3389}
3390
3391static void set_irq_happened(int trap)
3392{
3393        switch (trap) {
3394        case BOOK3S_INTERRUPT_EXTERNAL:
3395                local_paca->irq_happened |= PACA_IRQ_EE;
3396                break;
3397        case BOOK3S_INTERRUPT_H_DOORBELL:
3398                local_paca->irq_happened |= PACA_IRQ_DBELL;
3399                break;
3400        case BOOK3S_INTERRUPT_HMI:
3401                local_paca->irq_happened |= PACA_IRQ_HMI;
3402                break;
3403        case BOOK3S_INTERRUPT_SYSTEM_RESET:
3404                replay_system_reset();
3405                break;
3406        }
3407}
3408
3409/*
3410 * Run a set of guest threads on a physical core.
3411 * Called with vc->lock held.
3412 */
3413static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
3414{
3415        struct kvm_vcpu *vcpu;
3416        int i;
3417        int srcu_idx;
3418        struct core_info core_info;
3419        struct kvmppc_vcore *pvc;
3420        struct kvm_split_mode split_info, *sip;
3421        int split, subcore_size, active;
3422        int sub;
3423        bool thr0_done;
3424        unsigned long cmd_bit, stat_bit;
3425        int pcpu, thr;
3426        int target_threads;
3427        int controlled_threads;
3428        int trap;
3429        bool is_power8;
3430
3431        if (WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)))
3432                return;
3433
3434        /*
3435         * Remove from the list any threads that have a signal pending
3436         * or need a VPA update done
3437         */
3438        prepare_threads(vc);
3439
3440        /* if the runner is no longer runnable, let the caller pick a new one */
3441        if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE)
3442                return;
3443
3444        /*
3445         * Initialize *vc.
3446         */
3447        init_vcore_to_run(vc);
3448        vc->preempt_tb = TB_NIL;
3449
3450        /*
3451         * Number of threads that we will be controlling: the same as
3452         * the number of threads per subcore, except on POWER9,
3453         * where it's 1 because the threads are (mostly) independent.
3454         */
3455        controlled_threads = threads_per_vcore(vc->kvm);
3456
3457        /*
3458         * Make sure we are running on primary threads, and that secondary
3459         * threads are offline.  Also check if the number of threads in this
3460         * guest are greater than the current system threads per guest.
3461         */
3462        if ((controlled_threads > 1) &&
3463            ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
3464                for_each_runnable_thread(i, vcpu, vc) {
3465                        vcpu->arch.ret = -EBUSY;
3466                        kvmppc_remove_runnable(vc, vcpu);
3467                        wake_up(&vcpu->arch.cpu_run);
3468                }
3469                goto out;
3470        }
3471
3472        /*
3473         * See if we could run any other vcores on the physical core
3474         * along with this one.
3475         */
3476        init_core_info(&core_info, vc);
3477        pcpu = smp_processor_id();
3478        target_threads = controlled_threads;
3479        if (target_smt_mode && target_smt_mode < target_threads)
3480                target_threads = target_smt_mode;
3481        if (vc->num_threads < target_threads)
3482                collect_piggybacks(&core_info, target_threads);
3483
3484        /*
3485         * Hard-disable interrupts, and check resched flag and signals.
3486         * If we need to reschedule or deliver a signal, clean up
3487         * and return without going into the guest(s).
3488         * If the mmu_ready flag has been cleared, don't go into the
3489         * guest because that means a HPT resize operation is in progress.
3490         */
3491        local_irq_disable();
3492        hard_irq_disable();
3493        if (lazy_irq_pending() || need_resched() ||
3494            recheck_signals_and_mmu(&core_info)) {
3495                local_irq_enable();
3496                vc->vcore_state = VCORE_INACTIVE;
3497                /* Unlock all except the primary vcore */
3498                for (sub = 1; sub < core_info.n_subcores; ++sub) {
3499                        pvc = core_info.vc[sub];
3500                        /* Put back on to the preempted vcores list */
3501                        kvmppc_vcore_preempt(pvc);
3502                        spin_unlock(&pvc->lock);
3503                }
3504                for (i = 0; i < controlled_threads; ++i)
3505                        kvmppc_release_hwthread(pcpu + i);
3506                return;
3507        }
3508
3509        kvmppc_clear_host_core(pcpu);
3510
3511        /* Decide on micro-threading (split-core) mode */
3512        subcore_size = threads_per_subcore;
3513        cmd_bit = stat_bit = 0;
3514        split = core_info.n_subcores;
3515        sip = NULL;
3516        is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S);
3517
3518        if (split > 1) {
3519                sip = &split_info;
3520                memset(&split_info, 0, sizeof(split_info));
3521                for (sub = 0; sub < core_info.n_subcores; ++sub)
3522                        split_info.vc[sub] = core_info.vc[sub];
3523
3524                if (is_power8) {
3525                        if (split == 2 && (dynamic_mt_modes & 2)) {
3526                                cmd_bit = HID0_POWER8_1TO2LPAR;
3527                                stat_bit = HID0_POWER8_2LPARMODE;
3528                        } else {
3529                                split = 4;
3530                                cmd_bit = HID0_POWER8_1TO4LPAR;
3531                                stat_bit = HID0_POWER8_4LPARMODE;
3532                        }
3533                        subcore_size = MAX_SMT_THREADS / split;
3534                        split_info.rpr = mfspr(SPRN_RPR);
3535                        split_info.pmmar = mfspr(SPRN_PMMAR);
3536                        split_info.ldbar = mfspr(SPRN_LDBAR);
3537                        split_info.subcore_size = subcore_size;
3538                } else {
3539                        split_info.subcore_size = 1;
3540                }
3541
3542                /* order writes to split_info before kvm_split_mode pointer */
3543                smp_wmb();
3544        }
3545
3546        for (thr = 0; thr < controlled_threads; ++thr) {
3547                struct paca_struct *paca = paca_ptrs[pcpu + thr];
3548
3549                paca->kvm_hstate.napping = 0;
3550                paca->kvm_hstate.kvm_split_mode = sip;
3551        }
3552
3553        /* Initiate micro-threading (split-core) on POWER8 if required */
3554        if (cmd_bit) {
3555                unsigned long hid0 = mfspr(SPRN_HID0);
3556
3557                hid0 |= cmd_bit | HID0_POWER8_DYNLPARDIS;
3558                mb();
3559                mtspr(SPRN_HID0, hid0);
3560                isync();
3561                for (;;) {
3562                        hid0 = mfspr(SPRN_HID0);
3563                        if (hid0 & stat_bit)
3564                                break;
3565                        cpu_relax();
3566                }
3567        }
3568
3569        /*
3570         * On POWER8, set RWMR register.
3571         * Since it only affects PURR and SPURR, it doesn't affect
3572         * the host, so we don't save/restore the host value.
3573         */
3574        if (is_power8) {
3575                unsigned long rwmr_val = RWMR_RPA_P8_8THREAD;
3576                int n_online = atomic_read(&vc->online_count);
3577
3578                /*
3579                 * Use the 8-thread value if we're doing split-core
3580                 * or if the vcore's online count looks bogus.
3581                 */
3582                if (split == 1 && threads_per_subcore == MAX_SMT_THREADS &&
3583                    n_online >= 1 && n_online <= MAX_SMT_THREADS)
3584                        rwmr_val = p8_rwmr_values[n_online];
3585                mtspr(SPRN_RWMR, rwmr_val);
3586        }
3587
3588        /* Start all the threads */
3589        active = 0;
3590        for (sub = 0; sub < core_info.n_subcores; ++sub) {
3591                thr = is_power8 ? subcore_thread_map[sub] : sub;
3592                thr0_done = false;
3593                active |= 1 << thr;
3594                pvc = core_info.vc[sub];
3595                pvc->pcpu = pcpu + thr;
3596                for_each_runnable_thread(i, vcpu, pvc) {
3597                        kvmppc_start_thread(vcpu, pvc);
3598                        kvmppc_create_dtl_entry(vcpu, pvc);
3599                        trace_kvm_guest_enter(vcpu);
3600                        if (!vcpu->arch.ptid)
3601                                thr0_done = true;
3602                        active |= 1 << (thr + vcpu->arch.ptid);
3603                }
3604                /*
3605                 * We need to start the first thread of each subcore
3606                 * even if it doesn't have a vcpu.
3607                 */
3608                if (!thr0_done)
3609                        kvmppc_start_thread(NULL, pvc);
3610        }
3611
3612        /*
3613         * Ensure that split_info.do_nap is set after setting
3614         * the vcore pointer in the PACA of the secondaries.
3615         */
3616        smp_mb();
3617
3618        /*
3619         * When doing micro-threading, poke the inactive threads as well.
3620         * This gets them to the nap instruction after kvm_do_nap,
3621         * which reduces the time taken to unsplit later.
3622         */
3623        if (cmd_bit) {
3624                split_info.do_nap = 1;  /* ask secondaries to nap when done */
3625                for (thr = 1; thr < threads_per_subcore; ++thr)
3626                        if (!(active & (1 << thr)))
3627                                kvmppc_ipi_thread(pcpu + thr);
3628        }
3629
3630        vc->vcore_state = VCORE_RUNNING;
3631        preempt_disable();
3632
3633        trace_kvmppc_run_core(vc, 0);
3634
3635        for (sub = 0; sub < core_info.n_subcores; ++sub)
3636                spin_unlock(&core_info.vc[sub]->lock);
3637
3638        guest_enter_irqoff();
3639
3640        srcu_idx = srcu_read_lock(&vc->kvm->srcu);
3641
3642        this_cpu_disable_ftrace();
3643
3644        /*
3645         * Interrupts will be enabled once we get into the guest,
3646         * so tell lockdep that we're about to enable interrupts.
3647         */
3648        trace_hardirqs_on();
3649
3650        trap = __kvmppc_vcore_entry();
3651
3652        trace_hardirqs_off();
3653
3654        this_cpu_enable_ftrace();
3655
3656        srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
3657
3658        set_irq_happened(trap);
3659
3660        spin_lock(&vc->lock);
3661        /* prevent other vcpu threads from doing kvmppc_start_thread() now */
3662        vc->vcore_state = VCORE_EXITING;
3663
3664        /* wait for secondary threads to finish writing their state to memory */
3665        kvmppc_wait_for_nap(controlled_threads);
3666
3667        /* Return to whole-core mode if we split the core earlier */
3668        if (cmd_bit) {
3669                unsigned long hid0 = mfspr(SPRN_HID0);
3670                unsigned long loops = 0;
3671
3672                hid0 &= ~HID0_POWER8_DYNLPARDIS;
3673                stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE;
3674                mb();
3675                mtspr(SPRN_HID0, hid0);
3676                isync();
3677                for (;;) {
3678                        hid0 = mfspr(SPRN_HID0);
3679                        if (!(hid0 & stat_bit))
3680                                break;
3681                        cpu_relax();
3682                        ++loops;
3683                }
3684                split_info.do_nap = 0;
3685        }
3686
3687        kvmppc_set_host_core(pcpu);
3688
3689        guest_exit_irqoff();
3690
3691        local_irq_enable();
3692
3693        /* Let secondaries go back to the offline loop */
3694        for (i = 0; i < controlled_threads; ++i) {
3695                kvmppc_release_hwthread(pcpu + i);
3696                if (sip && sip->napped[i])
3697                        kvmppc_ipi_thread(pcpu + i);
3698                cpumask_clear_cpu(pcpu + i, &vc->kvm->arch.cpu_in_guest);
3699        }
3700
3701        spin_unlock(&vc->lock);
3702
3703        /* make sure updates to secondary vcpu structs are visible now */
3704        smp_mb();
3705
3706        preempt_enable();
3707
3708        for (sub = 0; sub < core_info.n_subcores; ++sub) {
3709                pvc = core_info.vc[sub];
3710                post_guest_process(pvc, pvc == vc);
3711        }
3712
3713        spin_lock(&vc->lock);
3714
3715 out:
3716        vc->vcore_state = VCORE_INACTIVE;
3717        trace_kvmppc_run_core(vc, 1);
3718}
3719
3720static void load_spr_state(struct kvm_vcpu *vcpu)
3721{
3722        mtspr(SPRN_DSCR, vcpu->arch.dscr);
3723        mtspr(SPRN_IAMR, vcpu->arch.iamr);
3724        mtspr(SPRN_PSPB, vcpu->arch.pspb);
3725        mtspr(SPRN_FSCR, vcpu->arch.fscr);
3726        mtspr(SPRN_TAR, vcpu->arch.tar);
3727        mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
3728        mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
3729        mtspr(SPRN_BESCR, vcpu->arch.bescr);
3730        mtspr(SPRN_WORT, vcpu->arch.wort);
3731        mtspr(SPRN_TIDR, vcpu->arch.tid);
3732        mtspr(SPRN_AMR, vcpu->arch.amr);
3733        mtspr(SPRN_UAMOR, vcpu->arch.uamor);
3734
3735        /*
3736         * DAR, DSISR, and for nested HV, SPRGs must be set with MSR[RI]
3737         * clear (or hstate set appropriately to catch those registers
3738         * being clobbered if we take a MCE or SRESET), so those are done
3739         * later.
3740         */
3741
3742        if (!(vcpu->arch.ctrl & 1))
3743                mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1);
3744}
3745
3746static void store_spr_state(struct kvm_vcpu *vcpu)
3747{
3748        vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
3749
3750        vcpu->arch.iamr = mfspr(SPRN_IAMR);
3751        vcpu->arch.pspb = mfspr(SPRN_PSPB);
3752        vcpu->arch.fscr = mfspr(SPRN_FSCR);
3753        vcpu->arch.tar = mfspr(SPRN_TAR);
3754        vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
3755        vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
3756        vcpu->arch.bescr = mfspr(SPRN_BESCR);
3757        vcpu->arch.wort = mfspr(SPRN_WORT);
3758        vcpu->arch.tid = mfspr(SPRN_TIDR);
3759        vcpu->arch.amr = mfspr(SPRN_AMR);
3760        vcpu->arch.uamor = mfspr(SPRN_UAMOR);
3761        vcpu->arch.dscr = mfspr(SPRN_DSCR);
3762}
3763
3764/*
3765 * Privileged (non-hypervisor) host registers to save.
3766 */
3767struct p9_host_os_sprs {
3768        unsigned long dscr;
3769        unsigned long tidr;
3770        unsigned long iamr;
3771        unsigned long amr;
3772        unsigned long fscr;
3773};
3774
3775static void save_p9_host_os_sprs(struct p9_host_os_sprs *host_os_sprs)
3776{
3777        host_os_sprs->dscr = mfspr(SPRN_DSCR);
3778        host_os_sprs->tidr = mfspr(SPRN_TIDR);
3779        host_os_sprs->iamr = mfspr(SPRN_IAMR);
3780        host_os_sprs->amr = mfspr(SPRN_AMR);
3781        host_os_sprs->fscr = mfspr(SPRN_FSCR);
3782}
3783
3784/* vcpu guest regs must already be saved */
3785static void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
3786                                    struct p9_host_os_sprs *host_os_sprs)
3787{
3788        mtspr(SPRN_PSPB, 0);
3789        mtspr(SPRN_WORT, 0);
3790        mtspr(SPRN_UAMOR, 0);
3791
3792        mtspr(SPRN_DSCR, host_os_sprs->dscr);
3793        mtspr(SPRN_TIDR, host_os_sprs->tidr);
3794        mtspr(SPRN_IAMR, host_os_sprs->iamr);
3795
3796        if (host_os_sprs->amr != vcpu->arch.amr)
3797                mtspr(SPRN_AMR, host_os_sprs->amr);
3798
3799        if (host_os_sprs->fscr != vcpu->arch.fscr)
3800                mtspr(SPRN_FSCR, host_os_sprs->fscr);
3801
3802        /* Save guest CTRL register, set runlatch to 1 */
3803        if (!(vcpu->arch.ctrl & 1))
3804                mtspr(SPRN_CTRLT, 1);
3805}
3806
3807static inline bool hcall_is_xics(unsigned long req)
3808{
3809        return req == H_EOI || req == H_CPPR || req == H_IPI ||
3810                req == H_IPOLL || req == H_XIRR || req == H_XIRR_X;
3811}
3812
3813/*
3814 * Guest entry for POWER9 and later CPUs.
3815 */
3816static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
3817                         unsigned long lpcr)
3818{
3819        struct kvmppc_vcore *vc = vcpu->arch.vcore;
3820        struct p9_host_os_sprs host_os_sprs;
3821        s64 dec;
3822        u64 tb;
3823        int trap, save_pmu;
3824
3825        WARN_ON_ONCE(vcpu->arch.ceded);
3826
3827        dec = mfspr(SPRN_DEC);
3828        tb = mftb();
3829        if (dec < 0)
3830                return BOOK3S_INTERRUPT_HV_DECREMENTER;
3831        local_paca->kvm_hstate.dec_expires = dec + tb;
3832        if (local_paca->kvm_hstate.dec_expires < time_limit)
3833                time_limit = local_paca->kvm_hstate.dec_expires;
3834
3835        save_p9_host_os_sprs(&host_os_sprs);
3836
3837        kvmhv_save_host_pmu();          /* saves it to PACA kvm_hstate */
3838
3839        kvmppc_subcore_enter_guest();
3840
3841        vc->entry_exit_map = 1;
3842        vc->in_guest = 1;
3843
3844        if (vcpu->arch.vpa.pinned_addr) {
3845                struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
3846                u32 yield_count = be32_to_cpu(lp->yield_count) + 1;
3847                lp->yield_count = cpu_to_be32(yield_count);
3848                vcpu->arch.vpa.dirty = 1;
3849        }
3850
3851        if (cpu_has_feature(CPU_FTR_TM) ||
3852            cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
3853                kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
3854
3855        kvmhv_load_guest_pmu(vcpu);
3856
3857        msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
3858        load_fp_state(&vcpu->arch.fp);
3859#ifdef CONFIG_ALTIVEC
3860        load_vr_state(&vcpu->arch.vr);
3861#endif
3862        mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
3863
3864        load_spr_state(vcpu);
3865
3866        /*
3867         * When setting DEC, we must always deal with irq_work_raise via NMI vs
3868         * setting DEC. The problem occurs right as we switch into guest mode
3869         * if a NMI hits and sets pending work and sets DEC, then that will
3870         * apply to the guest and not bring us back to the host.
3871         *
3872         * irq_work_raise could check a flag (or possibly LPCR[HDICE] for
3873         * example) and set HDEC to 1? That wouldn't solve the nested hv
3874         * case which needs to abort the hcall or zero the time limit.
3875         *
3876         * XXX: Another day's problem.
3877         */
3878        mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb());
3879
3880        if (kvmhv_on_pseries()) {
3881                /*
3882                 * We need to save and restore the guest visible part of the
3883                 * psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor
3884                 * doesn't do this for us. Note only required if pseries since
3885                 * this is done in kvmhv_vcpu_entry_p9() below otherwise.
3886                 */
3887                unsigned long host_psscr;
3888                /* call our hypervisor to load up HV regs and go */
3889                struct hv_guest_state hvregs;
3890
3891                host_psscr = mfspr(SPRN_PSSCR_PR);
3892                mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
3893                kvmhv_save_hv_regs(vcpu, &hvregs);
3894                hvregs.lpcr = lpcr;
3895                vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
3896                hvregs.version = HV_GUEST_STATE_VERSION;
3897                if (vcpu->arch.nested) {
3898                        hvregs.lpid = vcpu->arch.nested->shadow_lpid;
3899                        hvregs.vcpu_token = vcpu->arch.nested_vcpu_id;
3900                } else {
3901                        hvregs.lpid = vcpu->kvm->arch.lpid;
3902                        hvregs.vcpu_token = vcpu->vcpu_id;
3903                }
3904                hvregs.hdec_expiry = time_limit;
3905                mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
3906                mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
3907                trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
3908                                          __pa(&vcpu->arch.regs));
3909                kvmhv_restore_hv_return_state(vcpu, &hvregs);
3910                vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
3911                vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
3912                vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
3913                vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
3914                mtspr(SPRN_PSSCR_PR, host_psscr);
3915
3916                /* H_CEDE has to be handled now, not later */
3917                if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
3918                    kvmppc_get_gpr(vcpu, 3) == H_CEDE) {
3919                        kvmppc_cede(vcpu);
3920                        kvmppc_set_gpr(vcpu, 3, 0);
3921                        trap = 0;
3922                }
3923        } else {
3924                kvmppc_xive_push_vcpu(vcpu);
3925                trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr);
3926                if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
3927                    !(vcpu->arch.shregs.msr & MSR_PR)) {
3928                        unsigned long req = kvmppc_get_gpr(vcpu, 3);
3929
3930                        /* H_CEDE has to be handled now, not later */
3931                        if (req == H_CEDE) {
3932                                kvmppc_cede(vcpu);
3933                                kvmppc_xive_rearm_escalation(vcpu); /* may un-cede */
3934                                kvmppc_set_gpr(vcpu, 3, 0);
3935                                trap = 0;
3936
3937                        /* XICS hcalls must be handled before xive is pulled */
3938                        } else if (hcall_is_xics(req)) {
3939                                int ret;
3940
3941                                ret = kvmppc_xive_xics_hcall(vcpu, req);
3942                                if (ret != H_TOO_HARD) {
3943                                        kvmppc_set_gpr(vcpu, 3, ret);
3944                                        trap = 0;
3945                                }
3946                        }
3947                }
3948                kvmppc_xive_pull_vcpu(vcpu);
3949
3950                if (kvm_is_radix(vcpu->kvm))
3951                        vcpu->arch.slb_max = 0;
3952        }
3953
3954        dec = mfspr(SPRN_DEC);
3955        if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
3956                dec = (s32) dec;
3957        tb = mftb();
3958        vcpu->arch.dec_expires = dec + tb;
3959        vcpu->cpu = -1;
3960        vcpu->arch.thread_cpu = -1;
3961
3962        store_spr_state(vcpu);
3963
3964        restore_p9_host_os_sprs(vcpu, &host_os_sprs);
3965
3966        msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
3967        store_fp_state(&vcpu->arch.fp);
3968#ifdef CONFIG_ALTIVEC
3969        store_vr_state(&vcpu->arch.vr);
3970#endif
3971        vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
3972
3973        if (cpu_has_feature(CPU_FTR_TM) ||
3974            cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
3975                kvmppc_save_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
3976
3977        save_pmu = 1;
3978        if (vcpu->arch.vpa.pinned_addr) {
3979                struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
3980                u32 yield_count = be32_to_cpu(lp->yield_count) + 1;
3981                lp->yield_count = cpu_to_be32(yield_count);
3982                vcpu->arch.vpa.dirty = 1;
3983                save_pmu = lp->pmcregs_in_use;
3984        }
3985        /* Must save pmu if this guest is capable of running nested guests */
3986        save_pmu |= nesting_enabled(vcpu->kvm);
3987
3988        kvmhv_save_guest_pmu(vcpu, save_pmu);
3989
3990        vc->entry_exit_map = 0x101;
3991        vc->in_guest = 0;
3992
3993        mtspr(SPRN_DEC, local_paca->kvm_hstate.dec_expires - mftb());
3994        /* We may have raced with new irq work */
3995        if (test_irq_work_pending())
3996                set_dec(1);
3997        mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso);
3998
3999        kvmhv_load_host_pmu();
4000
4001        kvmppc_subcore_exit_guest();
4002
4003        return trap;
4004}
4005
4006/*
4007 * Wait for some other vcpu thread to execute us, and
4008 * wake us up when we need to handle something in the host.
4009 */
4010static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc,
4011                                 struct kvm_vcpu *vcpu, int wait_state)
4012{
4013        DEFINE_WAIT(wait);
4014
4015        prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
4016        if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
4017                spin_unlock(&vc->lock);
4018                schedule();
4019                spin_lock(&vc->lock);
4020        }
4021        finish_wait(&vcpu->arch.cpu_run, &wait);
4022}
4023
4024static void grow_halt_poll_ns(struct kvmppc_vcore *vc)
4025{
4026        if (!halt_poll_ns_grow)
4027                return;
4028
4029        vc->halt_poll_ns *= halt_poll_ns_grow;
4030        if (vc->halt_poll_ns < halt_poll_ns_grow_start)
4031                vc->halt_poll_ns = halt_poll_ns_grow_start;
4032}
4033
4034static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
4035{
4036        if (halt_poll_ns_shrink == 0)
4037                vc->halt_poll_ns = 0;
4038        else
4039                vc->halt_poll_ns /= halt_poll_ns_shrink;
4040}
4041
4042#ifdef CONFIG_KVM_XICS
4043static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
4044{
4045        if (!xics_on_xive())
4046                return false;
4047        return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr <
4048                vcpu->arch.xive_saved_state.cppr;
4049}
4050#else
4051static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
4052{
4053        return false;
4054}
4055#endif /* CONFIG_KVM_XICS */
4056
4057static bool kvmppc_vcpu_woken(struct kvm_vcpu *vcpu)
4058{
4059        if (vcpu->arch.pending_exceptions</