1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/kvm_host.h>
19#include <linux/kernel.h>
20#include <linux/err.h>
21#include <linux/slab.h>
22#include <linux/preempt.h>
23#include <linux/sched/signal.h>
24#include <linux/sched/stat.h>
25#include <linux/delay.h>
26#include <linux/export.h>
27#include <linux/fs.h>
28#include <linux/anon_inodes.h>
29#include <linux/cpu.h>
30#include <linux/cpumask.h>
31#include <linux/spinlock.h>
32#include <linux/page-flags.h>
33#include <linux/srcu.h>
34#include <linux/miscdevice.h>
35#include <linux/debugfs.h>
36#include <linux/gfp.h>
37#include <linux/vmalloc.h>
38#include <linux/highmem.h>
39#include <linux/hugetlb.h>
40#include <linux/kvm_irqfd.h>
41#include <linux/irqbypass.h>
42#include <linux/module.h>
43#include <linux/compiler.h>
44#include <linux/of.h>
45
46#include <asm/ftrace.h>
47#include <asm/reg.h>
48#include <asm/ppc-opcode.h>
49#include <asm/asm-prototypes.h>
50#include <asm/archrandom.h>
51#include <asm/debug.h>
52#include <asm/disassemble.h>
53#include <asm/cputable.h>
54#include <asm/cacheflush.h>
55#include <linux/uaccess.h>
56#include <asm/interrupt.h>
57#include <asm/io.h>
58#include <asm/kvm_ppc.h>
59#include <asm/kvm_book3s.h>
60#include <asm/mmu_context.h>
61#include <asm/lppaca.h>
62#include <asm/processor.h>
63#include <asm/cputhreads.h>
64#include <asm/page.h>
65#include <asm/hvcall.h>
66#include <asm/switch_to.h>
67#include <asm/smp.h>
68#include <asm/dbell.h>
69#include <asm/hmi.h>
70#include <asm/pnv-pci.h>
71#include <asm/mmu.h>
72#include <asm/opal.h>
73#include <asm/xics.h>
74#include <asm/xive.h>
75#include <asm/hw_breakpoint.h>
76#include <asm/kvm_book3s_uvmem.h>
77#include <asm/ultravisor.h>
78#include <asm/dtl.h>
79#include <asm/plpar_wrappers.h>
80
81#include "book3s.h"
82
83#define CREATE_TRACE_POINTS
84#include "trace_hv.h"
85
86
87
88
89
90
91#define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1)
92
93#define RESUME_PASSTHROUGH (RESUME_GUEST | RESUME_FLAG_ARCH2)
94
95
96#define TB_NIL (~(u64)0)
97
98static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
99
100static int dynamic_mt_modes = 6;
101module_param(dynamic_mt_modes, int, 0644);
102MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
103static int target_smt_mode;
104module_param(target_smt_mode, int, 0644);
105MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)");
106
107static bool one_vm_per_core;
108module_param(one_vm_per_core, bool, S_IRUGO | S_IWUSR);
109MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires POWER8 or older)");
110
111#ifdef CONFIG_KVM_XICS
112static const struct kernel_param_ops module_param_ops = {
113 .set = param_set_int,
114 .get = param_get_int,
115};
116
117module_param_cb(kvm_irq_bypass, &module_param_ops, &kvm_irq_bypass, 0644);
118MODULE_PARM_DESC(kvm_irq_bypass, "Bypass passthrough interrupt optimization");
119
120module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, 0644);
121MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
122#endif
123
124
125static bool nested = true;
126module_param(nested, bool, S_IRUGO | S_IWUSR);
127MODULE_PARM_DESC(nested, "Enable nested virtualization (only on POWER9)");
128
129static inline bool nesting_enabled(struct kvm *kvm)
130{
131 return kvm->arch.nested_enable && kvm_is_radix(kvm);
132}
133
134static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
135
136
137
138
139
140
141#define RWMR_RPA_P8_1THREAD 0x164520C62609AECAUL
142#define RWMR_RPA_P8_2THREAD 0x7FFF2908450D8DA9UL
143#define RWMR_RPA_P8_3THREAD 0x164520C62609AECAUL
144#define RWMR_RPA_P8_4THREAD 0x199A421245058DA9UL
145#define RWMR_RPA_P8_5THREAD 0x164520C62609AECAUL
146#define RWMR_RPA_P8_6THREAD 0x164520C62609AECAUL
147#define RWMR_RPA_P8_7THREAD 0x164520C62609AECAUL
148#define RWMR_RPA_P8_8THREAD 0x164520C62609AECAUL
149
150static unsigned long p8_rwmr_values[MAX_SMT_THREADS + 1] = {
151 RWMR_RPA_P8_1THREAD,
152 RWMR_RPA_P8_1THREAD,
153 RWMR_RPA_P8_2THREAD,
154 RWMR_RPA_P8_3THREAD,
155 RWMR_RPA_P8_4THREAD,
156 RWMR_RPA_P8_5THREAD,
157 RWMR_RPA_P8_6THREAD,
158 RWMR_RPA_P8_7THREAD,
159 RWMR_RPA_P8_8THREAD,
160};
161
162static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
163 int *ip)
164{
165 int i = *ip;
166 struct kvm_vcpu *vcpu;
167
168 while (++i < MAX_SMT_THREADS) {
169 vcpu = READ_ONCE(vc->runnable_threads[i]);
170 if (vcpu) {
171 *ip = i;
172 return vcpu;
173 }
174 }
175 return NULL;
176}
177
178
179#define for_each_runnable_thread(i, vcpu, vc) \
180 for (i = -1; (vcpu = next_runnable_thread(vc, &i)); )
181
182static bool kvmppc_ipi_thread(int cpu)
183{
184 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
185
186
187 if (kvmhv_on_pseries())
188 return false;
189
190
191 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
192 msg |= get_hard_smp_processor_id(cpu);
193 smp_mb();
194 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
195 return true;
196 }
197
198
199 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
200 preempt_disable();
201 if (cpu_first_thread_sibling(cpu) ==
202 cpu_first_thread_sibling(smp_processor_id())) {
203 msg |= cpu_thread_in_core(cpu);
204 smp_mb();
205 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
206 preempt_enable();
207 return true;
208 }
209 preempt_enable();
210 }
211
212#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
213 if (cpu >= 0 && cpu < nr_cpu_ids) {
214 if (paca_ptrs[cpu]->kvm_hstate.xics_phys) {
215 xics_wake_cpu(cpu);
216 return true;
217 }
218 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
219 return true;
220 }
221#endif
222
223 return false;
224}
225
226static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
227{
228 int cpu;
229 struct rcuwait *waitp;
230
231 waitp = kvm_arch_vcpu_get_wait(vcpu);
232 if (rcuwait_wake_up(waitp))
233 ++vcpu->stat.generic.halt_wakeup;
234
235 cpu = READ_ONCE(vcpu->arch.thread_cpu);
236 if (cpu >= 0 && kvmppc_ipi_thread(cpu))
237 return;
238
239
240 cpu = vcpu->cpu;
241 if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu))
242 smp_send_reschedule(cpu);
243}
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc)
279{
280 unsigned long flags;
281
282 spin_lock_irqsave(&vc->stoltb_lock, flags);
283 vc->preempt_tb = mftb();
284 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
285}
286
287static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc)
288{
289 unsigned long flags;
290
291 spin_lock_irqsave(&vc->stoltb_lock, flags);
292 if (vc->preempt_tb != TB_NIL) {
293 vc->stolen_tb += mftb() - vc->preempt_tb;
294 vc->preempt_tb = TB_NIL;
295 }
296 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
297}
298
299static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
300{
301 struct kvmppc_vcore *vc = vcpu->arch.vcore;
302 unsigned long flags;
303
304
305
306
307
308
309
310 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
311 kvmppc_core_end_stolen(vc);
312
313 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
314 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
315 vcpu->arch.busy_preempt != TB_NIL) {
316 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
317 vcpu->arch.busy_preempt = TB_NIL;
318 }
319 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
320}
321
322static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
323{
324 struct kvmppc_vcore *vc = vcpu->arch.vcore;
325 unsigned long flags;
326
327 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
328 kvmppc_core_start_stolen(vc);
329
330 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
331 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
332 vcpu->arch.busy_preempt = mftb();
333 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
334}
335
336static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
337{
338 vcpu->arch.pvr = pvr;
339}
340
341
342#define PCR_ARCH_31 (PCR_ARCH_300 << 1)
343
344static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
345{
346 unsigned long host_pcr_bit = 0, guest_pcr_bit = 0;
347 struct kvmppc_vcore *vc = vcpu->arch.vcore;
348
349
350 if (cpu_has_feature(CPU_FTR_ARCH_31))
351 host_pcr_bit = PCR_ARCH_31;
352 else if (cpu_has_feature(CPU_FTR_ARCH_300))
353 host_pcr_bit = PCR_ARCH_300;
354 else if (cpu_has_feature(CPU_FTR_ARCH_207S))
355 host_pcr_bit = PCR_ARCH_207;
356 else if (cpu_has_feature(CPU_FTR_ARCH_206))
357 host_pcr_bit = PCR_ARCH_206;
358 else
359 host_pcr_bit = PCR_ARCH_205;
360
361
362 guest_pcr_bit = host_pcr_bit;
363 if (arch_compat) {
364 switch (arch_compat) {
365 case PVR_ARCH_205:
366 guest_pcr_bit = PCR_ARCH_205;
367 break;
368 case PVR_ARCH_206:
369 case PVR_ARCH_206p:
370 guest_pcr_bit = PCR_ARCH_206;
371 break;
372 case PVR_ARCH_207:
373 guest_pcr_bit = PCR_ARCH_207;
374 break;
375 case PVR_ARCH_300:
376 guest_pcr_bit = PCR_ARCH_300;
377 break;
378 case PVR_ARCH_31:
379 guest_pcr_bit = PCR_ARCH_31;
380 break;
381 default:
382 return -EINVAL;
383 }
384 }
385
386
387 if (guest_pcr_bit > host_pcr_bit)
388 return -EINVAL;
389
390 spin_lock(&vc->lock);
391 vc->arch_compat = arch_compat;
392
393
394
395
396 vc->pcr = (host_pcr_bit - guest_pcr_bit) | PCR_MASK;
397 spin_unlock(&vc->lock);
398
399 return 0;
400}
401
402static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
403{
404 int r;
405
406 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
407 pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
408 vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap);
409 for (r = 0; r < 16; ++r)
410 pr_err("r%2d = %.16lx r%d = %.16lx\n",
411 r, kvmppc_get_gpr(vcpu, r),
412 r+16, kvmppc_get_gpr(vcpu, r+16));
413 pr_err("ctr = %.16lx lr = %.16lx\n",
414 vcpu->arch.regs.ctr, vcpu->arch.regs.link);
415 pr_err("srr0 = %.16llx srr1 = %.16llx\n",
416 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
417 pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
418 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
419 pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
420 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
421 pr_err("cr = %.8lx xer = %.16lx dsisr = %.8x\n",
422 vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
423 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
424 pr_err("fault dar = %.16lx dsisr = %.8x\n",
425 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
426 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
427 for (r = 0; r < vcpu->arch.slb_max; ++r)
428 pr_err(" ESID = %.16llx VSID = %.16llx\n",
429 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
430 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
431 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
432 vcpu->arch.last_inst);
433}
434
435static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
436{
437 return kvm_get_vcpu_by_id(kvm, id);
438}
439
440static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
441{
442 vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
443 vpa->yield_count = cpu_to_be32(1);
444}
445
446static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
447 unsigned long addr, unsigned long len)
448{
449
450 if (addr & (L1_CACHE_BYTES - 1))
451 return -EINVAL;
452 spin_lock(&vcpu->arch.vpa_update_lock);
453 if (v->next_gpa != addr || v->len != len) {
454 v->next_gpa = addr;
455 v->len = addr ? len : 0;
456 v->update_pending = 1;
457 }
458 spin_unlock(&vcpu->arch.vpa_update_lock);
459 return 0;
460}
461
462
463struct reg_vpa {
464 u32 dummy;
465 union {
466 __be16 hword;
467 __be32 word;
468 } length;
469};
470
471static int vpa_is_registered(struct kvmppc_vpa *vpap)
472{
473 if (vpap->update_pending)
474 return vpap->next_gpa != 0;
475 return vpap->pinned_addr != NULL;
476}
477
478static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
479 unsigned long flags,
480 unsigned long vcpuid, unsigned long vpa)
481{
482 struct kvm *kvm = vcpu->kvm;
483 unsigned long len, nb;
484 void *va;
485 struct kvm_vcpu *tvcpu;
486 int err;
487 int subfunc;
488 struct kvmppc_vpa *vpap;
489
490 tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
491 if (!tvcpu)
492 return H_PARAMETER;
493
494 subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
495 if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
496 subfunc == H_VPA_REG_SLB) {
497
498 if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
499 return H_PARAMETER;
500
501
502 va = kvmppc_pin_guest_page(kvm, vpa, &nb);
503 if (va == NULL)
504 return H_PARAMETER;
505 if (subfunc == H_VPA_REG_VPA)
506 len = be16_to_cpu(((struct reg_vpa *)va)->length.hword);
507 else
508 len = be32_to_cpu(((struct reg_vpa *)va)->length.word);
509 kvmppc_unpin_guest_page(kvm, va, vpa, false);
510
511
512 if (len > nb || len < sizeof(struct reg_vpa))
513 return H_PARAMETER;
514 } else {
515 vpa = 0;
516 len = 0;
517 }
518
519 err = H_PARAMETER;
520 vpap = NULL;
521 spin_lock(&tvcpu->arch.vpa_update_lock);
522
523 switch (subfunc) {
524 case H_VPA_REG_VPA:
525
526
527
528
529
530
531 BUILD_BUG_ON(sizeof(struct lppaca) != 640);
532 if (len < sizeof(struct lppaca))
533 break;
534 vpap = &tvcpu->arch.vpa;
535 err = 0;
536 break;
537
538 case H_VPA_REG_DTL:
539 if (len < sizeof(struct dtl_entry))
540 break;
541 len -= len % sizeof(struct dtl_entry);
542
543
544 err = H_RESOURCE;
545 if (!vpa_is_registered(&tvcpu->arch.vpa))
546 break;
547
548 vpap = &tvcpu->arch.dtl;
549 err = 0;
550 break;
551
552 case H_VPA_REG_SLB:
553
554 err = H_RESOURCE;
555 if (!vpa_is_registered(&tvcpu->arch.vpa))
556 break;
557
558 vpap = &tvcpu->arch.slb_shadow;
559 err = 0;
560 break;
561
562 case H_VPA_DEREG_VPA:
563
564 err = H_RESOURCE;
565 if (vpa_is_registered(&tvcpu->arch.dtl) ||
566 vpa_is_registered(&tvcpu->arch.slb_shadow))
567 break;
568
569 vpap = &tvcpu->arch.vpa;
570 err = 0;
571 break;
572
573 case H_VPA_DEREG_DTL:
574 vpap = &tvcpu->arch.dtl;
575 err = 0;
576 break;
577
578 case H_VPA_DEREG_SLB:
579 vpap = &tvcpu->arch.slb_shadow;
580 err = 0;
581 break;
582 }
583
584 if (vpap) {
585 vpap->next_gpa = vpa;
586 vpap->len = len;
587 vpap->update_pending = 1;
588 }
589
590 spin_unlock(&tvcpu->arch.vpa_update_lock);
591
592 return err;
593}
594
595static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
596{
597 struct kvm *kvm = vcpu->kvm;
598 void *va;
599 unsigned long nb;
600 unsigned long gpa;
601
602
603
604
605
606
607
608
609
610 for (;;) {
611 gpa = vpap->next_gpa;
612 spin_unlock(&vcpu->arch.vpa_update_lock);
613 va = NULL;
614 nb = 0;
615 if (gpa)
616 va = kvmppc_pin_guest_page(kvm, gpa, &nb);
617 spin_lock(&vcpu->arch.vpa_update_lock);
618 if (gpa == vpap->next_gpa)
619 break;
620
621 if (va)
622 kvmppc_unpin_guest_page(kvm, va, gpa, false);
623 }
624
625 vpap->update_pending = 0;
626 if (va && nb < vpap->len) {
627
628
629
630
631
632 kvmppc_unpin_guest_page(kvm, va, gpa, false);
633 va = NULL;
634 }
635 if (vpap->pinned_addr)
636 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
637 vpap->dirty);
638 vpap->gpa = gpa;
639 vpap->pinned_addr = va;
640 vpap->dirty = false;
641 if (va)
642 vpap->pinned_end = va + vpap->len;
643}
644
645static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
646{
647 if (!(vcpu->arch.vpa.update_pending ||
648 vcpu->arch.slb_shadow.update_pending ||
649 vcpu->arch.dtl.update_pending))
650 return;
651
652 spin_lock(&vcpu->arch.vpa_update_lock);
653 if (vcpu->arch.vpa.update_pending) {
654 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
655 if (vcpu->arch.vpa.pinned_addr)
656 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
657 }
658 if (vcpu->arch.dtl.update_pending) {
659 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
660 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
661 vcpu->arch.dtl_index = 0;
662 }
663 if (vcpu->arch.slb_shadow.update_pending)
664 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
665 spin_unlock(&vcpu->arch.vpa_update_lock);
666}
667
668
669
670
671
672static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
673{
674 u64 p;
675 unsigned long flags;
676
677 spin_lock_irqsave(&vc->stoltb_lock, flags);
678 p = vc->stolen_tb;
679 if (vc->vcore_state != VCORE_INACTIVE &&
680 vc->preempt_tb != TB_NIL)
681 p += now - vc->preempt_tb;
682 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
683 return p;
684}
685
686static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
687 struct kvmppc_vcore *vc)
688{
689 struct dtl_entry *dt;
690 struct lppaca *vpa;
691 unsigned long stolen;
692 unsigned long core_stolen;
693 u64 now;
694 unsigned long flags;
695
696 dt = vcpu->arch.dtl_ptr;
697 vpa = vcpu->arch.vpa.pinned_addr;
698 now = mftb();
699 core_stolen = vcore_stolen_time(vc, now);
700 stolen = core_stolen - vcpu->arch.stolen_logged;
701 vcpu->arch.stolen_logged = core_stolen;
702 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
703 stolen += vcpu->arch.busy_stolen;
704 vcpu->arch.busy_stolen = 0;
705 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
706 if (!dt || !vpa)
707 return;
708 memset(dt, 0, sizeof(struct dtl_entry));
709 dt->dispatch_reason = 7;
710 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
711 dt->timebase = cpu_to_be64(now + vc->tb_offset);
712 dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
713 dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
714 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
715 ++dt;
716 if (dt == vcpu->arch.dtl.pinned_end)
717 dt = vcpu->arch.dtl.pinned_addr;
718 vcpu->arch.dtl_ptr = dt;
719
720 smp_wmb();
721 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
722 vcpu->arch.dtl.dirty = true;
723}
724
725
726static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu)
727{
728 int thr;
729 struct kvmppc_vcore *vc;
730
731 if (vcpu->arch.doorbell_request)
732 return true;
733
734
735
736
737
738 smp_rmb();
739 vc = vcpu->arch.vcore;
740 thr = vcpu->vcpu_id - vc->first_vcpuid;
741 return !!(vc->dpdes & (1 << thr));
742}
743
744static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
745{
746 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
747 return true;
748 if ((!vcpu->arch.vcore->arch_compat) &&
749 cpu_has_feature(CPU_FTR_ARCH_207S))
750 return true;
751 return false;
752}
753
754static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
755 unsigned long resource, unsigned long value1,
756 unsigned long value2)
757{
758 switch (resource) {
759 case H_SET_MODE_RESOURCE_SET_CIABR:
760 if (!kvmppc_power8_compatible(vcpu))
761 return H_P2;
762 if (value2)
763 return H_P4;
764 if (mflags)
765 return H_UNSUPPORTED_FLAG_START;
766
767 if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
768 return H_P3;
769 vcpu->arch.ciabr = value1;
770 return H_SUCCESS;
771 case H_SET_MODE_RESOURCE_SET_DAWR0:
772 if (!kvmppc_power8_compatible(vcpu))
773 return H_P2;
774 if (!ppc_breakpoint_available())
775 return H_P2;
776 if (mflags)
777 return H_UNSUPPORTED_FLAG_START;
778 if (value2 & DABRX_HYP)
779 return H_P4;
780 vcpu->arch.dawr0 = value1;
781 vcpu->arch.dawrx0 = value2;
782 return H_SUCCESS;
783 case H_SET_MODE_RESOURCE_SET_DAWR1:
784 if (!kvmppc_power8_compatible(vcpu))
785 return H_P2;
786 if (!ppc_breakpoint_available())
787 return H_P2;
788 if (!cpu_has_feature(CPU_FTR_DAWR1))
789 return H_P2;
790 if (!vcpu->kvm->arch.dawr1_enabled)
791 return H_FUNCTION;
792 if (mflags)
793 return H_UNSUPPORTED_FLAG_START;
794 if (value2 & DABRX_HYP)
795 return H_P4;
796 vcpu->arch.dawr1 = value1;
797 vcpu->arch.dawrx1 = value2;
798 return H_SUCCESS;
799 case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
800
801
802
803
804 if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG) &&
805 kvmhv_vcpu_is_radix(vcpu) && mflags == 3)
806 return H_UNSUPPORTED_FLAG_START;
807 return H_TOO_HARD;
808 default:
809 return H_TOO_HARD;
810 }
811}
812
813
814static int kvmppc_copy_guest(struct kvm *kvm, gpa_t to, gpa_t from,
815 unsigned long len)
816{
817 struct kvm_memory_slot *to_memslot = NULL;
818 struct kvm_memory_slot *from_memslot = NULL;
819 unsigned long to_addr, from_addr;
820 int r;
821
822
823 from_memslot = gfn_to_memslot(kvm, from >> PAGE_SHIFT);
824 if (!from_memslot)
825 return -EFAULT;
826 if ((from + len) >= ((from_memslot->base_gfn + from_memslot->npages)
827 << PAGE_SHIFT))
828 return -EINVAL;
829 from_addr = gfn_to_hva_memslot(from_memslot, from >> PAGE_SHIFT);
830 if (kvm_is_error_hva(from_addr))
831 return -EFAULT;
832 from_addr |= (from & (PAGE_SIZE - 1));
833
834
835 to_memslot = gfn_to_memslot(kvm, to >> PAGE_SHIFT);
836 if (!to_memslot)
837 return -EFAULT;
838 if ((to + len) >= ((to_memslot->base_gfn + to_memslot->npages)
839 << PAGE_SHIFT))
840 return -EINVAL;
841 to_addr = gfn_to_hva_memslot(to_memslot, to >> PAGE_SHIFT);
842 if (kvm_is_error_hva(to_addr))
843 return -EFAULT;
844 to_addr |= (to & (PAGE_SIZE - 1));
845
846
847 r = raw_copy_in_user((void __user *)to_addr, (void __user *)from_addr,
848 len);
849 if (r)
850 return -EFAULT;
851 mark_page_dirty(kvm, to >> PAGE_SHIFT);
852 return 0;
853}
854
855static long kvmppc_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
856 unsigned long dest, unsigned long src)
857{
858 u64 pg_sz = SZ_4K;
859 u64 pg_mask = SZ_4K - 1;
860 int ret;
861
862
863 if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE |
864 H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED))
865 return H_PARAMETER;
866
867
868 if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask)))
869 return H_PARAMETER;
870
871
872 if (flags & H_COPY_PAGE) {
873 ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz);
874 if (ret < 0)
875 return H_PARAMETER;
876 } else if (flags & H_ZERO_PAGE) {
877 ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz);
878 if (ret < 0)
879 return H_PARAMETER;
880 }
881
882
883
884 return H_SUCCESS;
885}
886
887static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
888{
889 struct kvmppc_vcore *vcore = target->arch.vcore;
890
891
892
893
894
895
896
897
898
899
900
901
902
903 spin_lock(&vcore->lock);
904 if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
905 vcore->vcore_state != VCORE_INACTIVE &&
906 vcore->runner)
907 target = vcore->runner;
908 spin_unlock(&vcore->lock);
909
910 return kvm_vcpu_yield_to(target);
911}
912
913static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
914{
915 int yield_count = 0;
916 struct lppaca *lppaca;
917
918 spin_lock(&vcpu->arch.vpa_update_lock);
919 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
920 if (lppaca)
921 yield_count = be32_to_cpu(lppaca->yield_count);
922 spin_unlock(&vcpu->arch.vpa_update_lock);
923 return yield_count;
924}
925
926
927
928
929
930
931static int kvmppc_nested_h_rpt_invalidate(struct kvm_vcpu *vcpu)
932{
933 unsigned long type = kvmppc_get_gpr(vcpu, 6);
934 unsigned long pid, pg_sizes, start, end;
935
936
937
938
939 if (type & H_RPTI_TYPE_NESTED)
940 return RESUME_HOST;
941
942 pid = kvmppc_get_gpr(vcpu, 4);
943 pg_sizes = kvmppc_get_gpr(vcpu, 7);
944 start = kvmppc_get_gpr(vcpu, 8);
945 end = kvmppc_get_gpr(vcpu, 9);
946
947 do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid,
948 type, pg_sizes, start, end);
949
950 kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
951 return RESUME_GUEST;
952}
953
954static long kvmppc_h_rpt_invalidate(struct kvm_vcpu *vcpu,
955 unsigned long id, unsigned long target,
956 unsigned long type, unsigned long pg_sizes,
957 unsigned long start, unsigned long end)
958{
959 if (!kvm_is_radix(vcpu->kvm))
960 return H_UNSUPPORTED;
961
962 if (end < start)
963 return H_P5;
964
965
966
967
968 if (type & H_RPTI_TYPE_NESTED) {
969 if (!nesting_enabled(vcpu->kvm))
970 return H_FUNCTION;
971
972
973 if (target != H_RPTI_TARGET_CMMU)
974 return H_P2;
975
976 return do_h_rpt_invalidate_pat(vcpu, id, type, pg_sizes,
977 start, end);
978 }
979
980
981
982
983 do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid,
984 type, pg_sizes, start, end);
985 return H_SUCCESS;
986}
987
988int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
989{
990 struct kvm *kvm = vcpu->kvm;
991 unsigned long req = kvmppc_get_gpr(vcpu, 3);
992 unsigned long target, ret = H_SUCCESS;
993 int yield_count;
994 struct kvm_vcpu *tvcpu;
995 int idx, rc;
996
997 if (req <= MAX_HCALL_OPCODE &&
998 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
999 return RESUME_HOST;
1000
1001 switch (req) {
1002 case H_REMOVE:
1003 ret = kvmppc_h_remove(vcpu, kvmppc_get_gpr(vcpu, 4),
1004 kvmppc_get_gpr(vcpu, 5),
1005 kvmppc_get_gpr(vcpu, 6));
1006 if (ret == H_TOO_HARD)
1007 return RESUME_HOST;
1008 break;
1009 case H_ENTER:
1010 ret = kvmppc_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
1011 kvmppc_get_gpr(vcpu, 5),
1012 kvmppc_get_gpr(vcpu, 6),
1013 kvmppc_get_gpr(vcpu, 7));
1014 if (ret == H_TOO_HARD)
1015 return RESUME_HOST;
1016 break;
1017 case H_READ:
1018 ret = kvmppc_h_read(vcpu, kvmppc_get_gpr(vcpu, 4),
1019 kvmppc_get_gpr(vcpu, 5));
1020 if (ret == H_TOO_HARD)
1021 return RESUME_HOST;
1022 break;
1023 case H_CLEAR_MOD:
1024 ret = kvmppc_h_clear_mod(vcpu, kvmppc_get_gpr(vcpu, 4),
1025 kvmppc_get_gpr(vcpu, 5));
1026 if (ret == H_TOO_HARD)
1027 return RESUME_HOST;
1028 break;
1029 case H_CLEAR_REF:
1030 ret = kvmppc_h_clear_ref(vcpu, kvmppc_get_gpr(vcpu, 4),
1031 kvmppc_get_gpr(vcpu, 5));
1032 if (ret == H_TOO_HARD)
1033 return RESUME_HOST;
1034 break;
1035 case H_PROTECT:
1036 ret = kvmppc_h_protect(vcpu, kvmppc_get_gpr(vcpu, 4),
1037 kvmppc_get_gpr(vcpu, 5),
1038 kvmppc_get_gpr(vcpu, 6));
1039 if (ret == H_TOO_HARD)
1040 return RESUME_HOST;
1041 break;
1042 case H_BULK_REMOVE:
1043 ret = kvmppc_h_bulk_remove(vcpu);
1044 if (ret == H_TOO_HARD)
1045 return RESUME_HOST;
1046 break;
1047
1048 case H_CEDE:
1049 break;
1050 case H_PROD:
1051 target = kvmppc_get_gpr(vcpu, 4);
1052 tvcpu = kvmppc_find_vcpu(kvm, target);
1053 if (!tvcpu) {
1054 ret = H_PARAMETER;
1055 break;
1056 }
1057 tvcpu->arch.prodded = 1;
1058 smp_mb();
1059 if (tvcpu->arch.ceded)
1060 kvmppc_fast_vcpu_kick_hv(tvcpu);
1061 break;
1062 case H_CONFER:
1063 target = kvmppc_get_gpr(vcpu, 4);
1064 if (target == -1)
1065 break;
1066 tvcpu = kvmppc_find_vcpu(kvm, target);
1067 if (!tvcpu) {
1068 ret = H_PARAMETER;
1069 break;
1070 }
1071 yield_count = kvmppc_get_gpr(vcpu, 5);
1072 if (kvmppc_get_yield_count(tvcpu) != yield_count)
1073 break;
1074 kvm_arch_vcpu_yield_to(tvcpu);
1075 break;
1076 case H_REGISTER_VPA:
1077 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
1078 kvmppc_get_gpr(vcpu, 5),
1079 kvmppc_get_gpr(vcpu, 6));
1080 break;
1081 case H_RTAS:
1082 if (list_empty(&kvm->arch.rtas_tokens))
1083 return RESUME_HOST;
1084
1085 idx = srcu_read_lock(&kvm->srcu);
1086 rc = kvmppc_rtas_hcall(vcpu);
1087 srcu_read_unlock(&kvm->srcu, idx);
1088
1089 if (rc == -ENOENT)
1090 return RESUME_HOST;
1091 else if (rc == 0)
1092 break;
1093
1094
1095 return rc;
1096 case H_LOGICAL_CI_LOAD:
1097 ret = kvmppc_h_logical_ci_load(vcpu);
1098 if (ret == H_TOO_HARD)
1099 return RESUME_HOST;
1100 break;
1101 case H_LOGICAL_CI_STORE:
1102 ret = kvmppc_h_logical_ci_store(vcpu);
1103 if (ret == H_TOO_HARD)
1104 return RESUME_HOST;
1105 break;
1106 case H_SET_MODE:
1107 ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
1108 kvmppc_get_gpr(vcpu, 5),
1109 kvmppc_get_gpr(vcpu, 6),
1110 kvmppc_get_gpr(vcpu, 7));
1111 if (ret == H_TOO_HARD)
1112 return RESUME_HOST;
1113 break;
1114 case H_XIRR:
1115 case H_CPPR:
1116 case H_EOI:
1117 case H_IPI:
1118 case H_IPOLL:
1119 case H_XIRR_X:
1120 if (kvmppc_xics_enabled(vcpu)) {
1121 if (xics_on_xive()) {
1122 ret = H_NOT_AVAILABLE;
1123 return RESUME_GUEST;
1124 }
1125 ret = kvmppc_xics_hcall(vcpu, req);
1126 break;
1127 }
1128 return RESUME_HOST;
1129 case H_SET_DABR:
1130 ret = kvmppc_h_set_dabr(vcpu, kvmppc_get_gpr(vcpu, 4));
1131 break;
1132 case H_SET_XDABR:
1133 ret = kvmppc_h_set_xdabr(vcpu, kvmppc_get_gpr(vcpu, 4),
1134 kvmppc_get_gpr(vcpu, 5));
1135 break;
1136#ifdef CONFIG_SPAPR_TCE_IOMMU
1137 case H_GET_TCE:
1138 ret = kvmppc_h_get_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
1139 kvmppc_get_gpr(vcpu, 5));
1140 if (ret == H_TOO_HARD)
1141 return RESUME_HOST;
1142 break;
1143 case H_PUT_TCE:
1144 ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
1145 kvmppc_get_gpr(vcpu, 5),
1146 kvmppc_get_gpr(vcpu, 6));
1147 if (ret == H_TOO_HARD)
1148 return RESUME_HOST;
1149 break;
1150 case H_PUT_TCE_INDIRECT:
1151 ret = kvmppc_h_put_tce_indirect(vcpu, kvmppc_get_gpr(vcpu, 4),
1152 kvmppc_get_gpr(vcpu, 5),
1153 kvmppc_get_gpr(vcpu, 6),
1154 kvmppc_get_gpr(vcpu, 7));
1155 if (ret == H_TOO_HARD)
1156 return RESUME_HOST;
1157 break;
1158 case H_STUFF_TCE:
1159 ret = kvmppc_h_stuff_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
1160 kvmppc_get_gpr(vcpu, 5),
1161 kvmppc_get_gpr(vcpu, 6),
1162 kvmppc_get_gpr(vcpu, 7));
1163 if (ret == H_TOO_HARD)
1164 return RESUME_HOST;
1165 break;
1166#endif
1167 case H_RANDOM:
1168 if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4]))
1169 ret = H_HARDWARE;
1170 break;
1171 case H_RPT_INVALIDATE:
1172 ret = kvmppc_h_rpt_invalidate(vcpu, kvmppc_get_gpr(vcpu, 4),
1173 kvmppc_get_gpr(vcpu, 5),
1174 kvmppc_get_gpr(vcpu, 6),
1175 kvmppc_get_gpr(vcpu, 7),
1176 kvmppc_get_gpr(vcpu, 8),
1177 kvmppc_get_gpr(vcpu, 9));
1178 break;
1179
1180 case H_SET_PARTITION_TABLE:
1181 ret = H_FUNCTION;
1182 if (nesting_enabled(kvm))
1183 ret = kvmhv_set_partition_table(vcpu);
1184 break;
1185 case H_ENTER_NESTED:
1186 ret = H_FUNCTION;
1187 if (!nesting_enabled(kvm))
1188 break;
1189 ret = kvmhv_enter_nested_guest(vcpu);
1190 if (ret == H_INTERRUPT) {
1191 kvmppc_set_gpr(vcpu, 3, 0);
1192 vcpu->arch.hcall_needed = 0;
1193 return -EINTR;
1194 } else if (ret == H_TOO_HARD) {
1195 kvmppc_set_gpr(vcpu, 3, 0);
1196 vcpu->arch.hcall_needed = 0;
1197 return RESUME_HOST;
1198 }
1199 break;
1200 case H_TLB_INVALIDATE:
1201 ret = H_FUNCTION;
1202 if (nesting_enabled(kvm))
1203 ret = kvmhv_do_nested_tlbie(vcpu);
1204 break;
1205 case H_COPY_TOFROM_GUEST:
1206 ret = H_FUNCTION;
1207 if (nesting_enabled(kvm))
1208 ret = kvmhv_copy_tofrom_guest_nested(vcpu);
1209 break;
1210 case H_PAGE_INIT:
1211 ret = kvmppc_h_page_init(vcpu, kvmppc_get_gpr(vcpu, 4),
1212 kvmppc_get_gpr(vcpu, 5),
1213 kvmppc_get_gpr(vcpu, 6));
1214 break;
1215 case H_SVM_PAGE_IN:
1216 ret = H_UNSUPPORTED;
1217 if (kvmppc_get_srr1(vcpu) & MSR_S)
1218 ret = kvmppc_h_svm_page_in(kvm,
1219 kvmppc_get_gpr(vcpu, 4),
1220 kvmppc_get_gpr(vcpu, 5),
1221 kvmppc_get_gpr(vcpu, 6));
1222 break;
1223 case H_SVM_PAGE_OUT:
1224 ret = H_UNSUPPORTED;
1225 if (kvmppc_get_srr1(vcpu) & MSR_S)
1226 ret = kvmppc_h_svm_page_out(kvm,
1227 kvmppc_get_gpr(vcpu, 4),
1228 kvmppc_get_gpr(vcpu, 5),
1229 kvmppc_get_gpr(vcpu, 6));
1230 break;
1231 case H_SVM_INIT_START:
1232 ret = H_UNSUPPORTED;
1233 if (kvmppc_get_srr1(vcpu) & MSR_S)
1234 ret = kvmppc_h_svm_init_start(kvm);
1235 break;
1236 case H_SVM_INIT_DONE:
1237 ret = H_UNSUPPORTED;
1238 if (kvmppc_get_srr1(vcpu) & MSR_S)
1239 ret = kvmppc_h_svm_init_done(kvm);
1240 break;
1241 case H_SVM_INIT_ABORT:
1242
1243
1244
1245
1246
1247
1248
1249 ret = kvmppc_h_svm_init_abort(kvm);
1250 break;
1251
1252 default:
1253 return RESUME_HOST;
1254 }
1255 WARN_ON_ONCE(ret == H_TOO_HARD);
1256 kvmppc_set_gpr(vcpu, 3, ret);
1257 vcpu->arch.hcall_needed = 0;
1258 return RESUME_GUEST;
1259}
1260
1261
1262
1263
1264
1265
1266
1267
1268static void kvmppc_cede(struct kvm_vcpu *vcpu)
1269{
1270 vcpu->arch.shregs.msr |= MSR_EE;
1271 vcpu->arch.ceded = 1;
1272 smp_mb();
1273 if (vcpu->arch.prodded) {
1274 vcpu->arch.prodded = 0;
1275 smp_mb();
1276 vcpu->arch.ceded = 0;
1277 }
1278}
1279
1280static int kvmppc_hcall_impl_hv(unsigned long cmd)
1281{
1282 switch (cmd) {
1283 case H_CEDE:
1284 case H_PROD:
1285 case H_CONFER:
1286 case H_REGISTER_VPA:
1287 case H_SET_MODE:
1288 case H_LOGICAL_CI_LOAD:
1289 case H_LOGICAL_CI_STORE:
1290#ifdef CONFIG_KVM_XICS
1291 case H_XIRR:
1292 case H_CPPR:
1293 case H_EOI:
1294 case H_IPI:
1295 case H_IPOLL:
1296 case H_XIRR_X:
1297#endif
1298 case H_PAGE_INIT:
1299 case H_RPT_INVALIDATE:
1300 return 1;
1301 }
1302
1303
1304 return kvmppc_hcall_impl_hv_realmode(cmd);
1305}
1306
1307static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu)
1308{
1309 u32 last_inst;
1310
1311 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
1312 EMULATE_DONE) {
1313
1314
1315
1316
1317 return RESUME_GUEST;
1318 }
1319
1320 if (last_inst == KVMPPC_INST_SW_BREAKPOINT) {
1321 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
1322 vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
1323 return RESUME_HOST;
1324 } else {
1325 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1326 return RESUME_GUEST;
1327 }
1328}
1329
1330static void do_nothing(void *x)
1331{
1332}
1333
1334static unsigned long kvmppc_read_dpdes(struct kvm_vcpu *vcpu)
1335{
1336 int thr, cpu, pcpu, nthreads;
1337 struct kvm_vcpu *v;
1338 unsigned long dpdes;
1339
1340 nthreads = vcpu->kvm->arch.emul_smt_mode;
1341 dpdes = 0;
1342 cpu = vcpu->vcpu_id & ~(nthreads - 1);
1343 for (thr = 0; thr < nthreads; ++thr, ++cpu) {
1344 v = kvmppc_find_vcpu(vcpu->kvm, cpu);
1345 if (!v)
1346 continue;
1347
1348
1349
1350
1351
1352 pcpu = READ_ONCE(v->cpu);
1353 if (pcpu >= 0)
1354 smp_call_function_single(pcpu, do_nothing, NULL, 1);
1355 if (kvmppc_doorbell_pending(v))
1356 dpdes |= 1 << thr;
1357 }
1358 return dpdes;
1359}
1360
1361
1362
1363
1364
1365
1366
1367static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
1368{
1369 u32 inst, rb, thr;
1370 unsigned long arg;
1371 struct kvm *kvm = vcpu->kvm;
1372 struct kvm_vcpu *tvcpu;
1373
1374 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE)
1375 return RESUME_GUEST;
1376 if (get_op(inst) != 31)
1377 return EMULATE_FAIL;
1378 rb = get_rb(inst);
1379 thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1);
1380 switch (get_xop(inst)) {
1381 case OP_31_XOP_MSGSNDP:
1382 arg = kvmppc_get_gpr(vcpu, rb);
1383 if (((arg >> 27) & 0x1f) != PPC_DBELL_SERVER)
1384 break;
1385 arg &= 0x7f;
1386 if (arg >= kvm->arch.emul_smt_mode)
1387 break;
1388 tvcpu = kvmppc_find_vcpu(kvm, vcpu->vcpu_id - thr + arg);
1389 if (!tvcpu)
1390 break;
1391 if (!tvcpu->arch.doorbell_request) {
1392 tvcpu->arch.doorbell_request = 1;
1393 kvmppc_fast_vcpu_kick_hv(tvcpu);
1394 }
1395 break;
1396 case OP_31_XOP_MSGCLRP:
1397 arg = kvmppc_get_gpr(vcpu, rb);
1398 if (((arg >> 27) & 0x1f) != PPC_DBELL_SERVER)
1399 break;
1400 vcpu->arch.vcore->dpdes = 0;
1401 vcpu->arch.doorbell_request = 0;
1402 break;
1403 case OP_31_XOP_MFSPR:
1404 switch (get_sprn(inst)) {
1405 case SPRN_TIR:
1406 arg = thr;
1407 break;
1408 case SPRN_DPDES:
1409 arg = kvmppc_read_dpdes(vcpu);
1410 break;
1411 default:
1412 return EMULATE_FAIL;
1413 }
1414 kvmppc_set_gpr(vcpu, get_rt(inst), arg);
1415 break;
1416 default:
1417 return EMULATE_FAIL;
1418 }
1419 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
1420 return RESUME_GUEST;
1421}
1422
1423static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
1424 struct task_struct *tsk)
1425{
1426 struct kvm_run *run = vcpu->run;
1427 int r = RESUME_HOST;
1428
1429 vcpu->stat.sum_exits++;
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439 if (vcpu->arch.shregs.msr & MSR_HV) {
1440 printk(KERN_EMERG "KVM trap in HV mode!\n");
1441 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
1442 vcpu->arch.trap, kvmppc_get_pc(vcpu),
1443 vcpu->arch.shregs.msr);
1444 kvmppc_dump_regs(vcpu);
1445 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1446 run->hw.hardware_exit_reason = vcpu->arch.trap;
1447 return RESUME_HOST;
1448 }
1449 run->exit_reason = KVM_EXIT_UNKNOWN;
1450 run->ready_for_interrupt_injection = 1;
1451 switch (vcpu->arch.trap) {
1452
1453 case BOOK3S_INTERRUPT_HV_DECREMENTER:
1454 vcpu->stat.dec_exits++;
1455 r = RESUME_GUEST;
1456 break;
1457 case BOOK3S_INTERRUPT_EXTERNAL:
1458 case BOOK3S_INTERRUPT_H_DOORBELL:
1459 case BOOK3S_INTERRUPT_H_VIRT:
1460 vcpu->stat.ext_intr_exits++;
1461 r = RESUME_GUEST;
1462 break;
1463
1464 case BOOK3S_INTERRUPT_HMI:
1465 case BOOK3S_INTERRUPT_PERFMON:
1466 case BOOK3S_INTERRUPT_SYSTEM_RESET:
1467 r = RESUME_GUEST;
1468 break;
1469 case BOOK3S_INTERRUPT_MACHINE_CHECK: {
1470 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
1471 DEFAULT_RATELIMIT_BURST);
1472
1473
1474
1475
1476 if (__ratelimit(&rs))
1477 machine_check_print_event_info(&vcpu->arch.mce_evt,false, true);
1478
1479
1480
1481
1482
1483
1484
1485 if (!vcpu->kvm->arch.fwnmi_enabled) {
1486 ulong flags = vcpu->arch.shregs.msr & 0x083c0000;
1487 kvmppc_core_queue_machine_check(vcpu, flags);
1488 r = RESUME_GUEST;
1489 break;
1490 }
1491
1492
1493 run->exit_reason = KVM_EXIT_NMI;
1494 run->hw.hardware_exit_reason = vcpu->arch.trap;
1495
1496 run->flags &= ~KVM_RUN_PPC_NMI_DISP_MASK;
1497
1498 if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED)
1499 run->flags |= KVM_RUN_PPC_NMI_DISP_FULLY_RECOV;
1500 else
1501 run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV;
1502
1503 r = RESUME_HOST;
1504 break;
1505 }
1506 case BOOK3S_INTERRUPT_PROGRAM:
1507 {
1508 ulong flags;
1509
1510
1511
1512
1513
1514
1515 flags = vcpu->arch.shregs.msr & 0x1f0000ull;
1516 kvmppc_core_queue_program(vcpu, flags);
1517 r = RESUME_GUEST;
1518 break;
1519 }
1520 case BOOK3S_INTERRUPT_SYSCALL:
1521 {
1522 int i;
1523
1524 if (unlikely(vcpu->arch.shregs.msr & MSR_PR)) {
1525
1526
1527
1528
1529
1530 if (!kvmhv_vcpu_is_radix(vcpu)) {
1531
1532
1533
1534
1535
1536 kvmppc_core_queue_syscall(vcpu);
1537 } else {
1538
1539
1540
1541
1542
1543
1544 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
1545 }
1546 r = RESUME_GUEST;
1547 break;
1548 }
1549
1550
1551
1552
1553
1554
1555 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
1556 for (i = 0; i < 9; ++i)
1557 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
1558 run->exit_reason = KVM_EXIT_PAPR_HCALL;
1559 vcpu->arch.hcall_needed = 1;
1560 r = RESUME_HOST;
1561 break;
1562 }
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573 case BOOK3S_INTERRUPT_H_DATA_STORAGE: {
1574 unsigned long vsid;
1575 long err;
1576
1577 if (vcpu->arch.fault_dsisr == HDSISR_CANARY) {
1578 r = RESUME_GUEST;
1579 break;
1580 }
1581
1582 if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
1583
1584
1585
1586
1587
1588
1589 r = RESUME_PAGE_FAULT;
1590 break;
1591 }
1592
1593 if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) {
1594 kvmppc_core_queue_data_storage(vcpu,
1595 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
1596 r = RESUME_GUEST;
1597 break;
1598 }
1599
1600 if (!(vcpu->arch.shregs.msr & MSR_DR))
1601 vsid = vcpu->kvm->arch.vrma_slb_v;
1602 else
1603 vsid = vcpu->arch.fault_gpa;
1604
1605 err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
1606 vsid, vcpu->arch.fault_dsisr, true);
1607 if (err == 0) {
1608 r = RESUME_GUEST;
1609 } else if (err == -1 || err == -2) {
1610 r = RESUME_PAGE_FAULT;
1611 } else {
1612 kvmppc_core_queue_data_storage(vcpu,
1613 vcpu->arch.fault_dar, err);
1614 r = RESUME_GUEST;
1615 }
1616 break;
1617 }
1618 case BOOK3S_INTERRUPT_H_INST_STORAGE: {
1619 unsigned long vsid;
1620 long err;
1621
1622 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
1623 vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
1624 DSISR_SRR1_MATCH_64S;
1625 if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
1626
1627
1628
1629
1630
1631
1632 if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
1633 vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
1634 r = RESUME_PAGE_FAULT;
1635 break;
1636 }
1637
1638 if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) {
1639 kvmppc_core_queue_inst_storage(vcpu,
1640 vcpu->arch.fault_dsisr);
1641 r = RESUME_GUEST;
1642 break;
1643 }
1644
1645 if (!(vcpu->arch.shregs.msr & MSR_IR))
1646 vsid = vcpu->kvm->arch.vrma_slb_v;
1647 else
1648 vsid = vcpu->arch.fault_gpa;
1649
1650 err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
1651 vsid, vcpu->arch.fault_dsisr, false);
1652 if (err == 0) {
1653 r = RESUME_GUEST;
1654 } else if (err == -1) {
1655 r = RESUME_PAGE_FAULT;
1656 } else {
1657 kvmppc_core_queue_inst_storage(vcpu, err);
1658 r = RESUME_GUEST;
1659 }
1660 break;
1661 }
1662
1663
1664
1665
1666
1667
1668
1669
1670 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
1671 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED)
1672 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ?
1673 swab32(vcpu->arch.emul_inst) :
1674 vcpu->arch.emul_inst;
1675 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
1676 r = kvmppc_emulate_debug_inst(vcpu);
1677 } else {
1678 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1679 r = RESUME_GUEST;
1680 }
1681 break;
1682
1683
1684
1685
1686
1687
1688
1689 case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
1690 r = EMULATE_FAIL;
1691 if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) &&
1692 cpu_has_feature(CPU_FTR_ARCH_300))
1693 r = kvmppc_emulate_doorbell_instr(vcpu);
1694 if (r == EMULATE_FAIL) {
1695 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1696 r = RESUME_GUEST;
1697 }
1698 break;
1699
1700#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1701 case BOOK3S_INTERRUPT_HV_SOFTPATCH:
1702
1703
1704
1705
1706
1707
1708 r = kvmhv_p9_tm_emulation(vcpu);
1709 break;
1710#endif
1711
1712 case BOOK3S_INTERRUPT_HV_RM_HARD:
1713 r = RESUME_PASSTHROUGH;
1714 break;
1715 default:
1716 kvmppc_dump_regs(vcpu);
1717 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
1718 vcpu->arch.trap, kvmppc_get_pc(vcpu),
1719 vcpu->arch.shregs.msr);
1720 run->hw.hardware_exit_reason = vcpu->arch.trap;
1721 r = RESUME_HOST;
1722 break;
1723 }
1724
1725 return r;
1726}
1727
1728static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
1729{
1730 int r;
1731 int srcu_idx;
1732
1733 vcpu->stat.sum_exits++;
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743 if (vcpu->arch.shregs.msr & MSR_HV) {
1744 pr_emerg("KVM trap in HV mode while nested!\n");
1745 pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n",
1746 vcpu->arch.trap, kvmppc_get_pc(vcpu),
1747 vcpu->arch.shregs.msr);
1748 kvmppc_dump_regs(vcpu);
1749 return RESUME_HOST;
1750 }
1751 switch (vcpu->arch.trap) {
1752
1753 case BOOK3S_INTERRUPT_HV_DECREMENTER:
1754 vcpu->stat.dec_exits++;
1755 r = RESUME_GUEST;
1756 break;
1757 case BOOK3S_INTERRUPT_EXTERNAL:
1758 vcpu->stat.ext_intr_exits++;
1759 r = RESUME_HOST;
1760 break;
1761 case BOOK3S_INTERRUPT_H_DOORBELL:
1762 case BOOK3S_INTERRUPT_H_VIRT:
1763 vcpu->stat.ext_intr_exits++;
1764 r = RESUME_GUEST;
1765 break;
1766
1767 case BOOK3S_INTERRUPT_HMI:
1768 case BOOK3S_INTERRUPT_PERFMON:
1769 case BOOK3S_INTERRUPT_SYSTEM_RESET:
1770 r = RESUME_GUEST;
1771 break;
1772 case BOOK3S_INTERRUPT_MACHINE_CHECK:
1773 {
1774 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
1775 DEFAULT_RATELIMIT_BURST);
1776
1777 r = RESUME_HOST;
1778
1779 if (__ratelimit(&rs))
1780 machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
1781 break;
1782 }
1783
1784
1785
1786
1787
1788
1789 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
1790 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1791 r = kvmhv_nested_page_fault(vcpu);
1792 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1793 break;
1794 case BOOK3S_INTERRUPT_H_INST_STORAGE:
1795 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
1796 vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) &
1797 DSISR_SRR1_MATCH_64S;
1798 if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
1799 vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
1800 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1801 r = kvmhv_nested_page_fault(vcpu);
1802 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1803 break;
1804
1805#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1806 case BOOK3S_INTERRUPT_HV_SOFTPATCH:
1807
1808
1809
1810
1811
1812
1813 r = kvmhv_p9_tm_emulation(vcpu);
1814 break;
1815#endif
1816
1817 case BOOK3S_INTERRUPT_HV_RM_HARD:
1818 vcpu->arch.trap = 0;
1819 r = RESUME_GUEST;
1820 if (!xics_on_xive())
1821 kvmppc_xics_rm_complete(vcpu, 0);
1822 break;
1823 case BOOK3S_INTERRUPT_SYSCALL:
1824 {
1825 unsigned long req = kvmppc_get_gpr(vcpu, 3);
1826
1827
1828
1829
1830
1831
1832 if (req == H_RPT_INVALIDATE) {
1833 r = kvmppc_nested_h_rpt_invalidate(vcpu);
1834 break;
1835 }
1836
1837 r = RESUME_HOST;
1838 break;
1839 }
1840 default:
1841 r = RESUME_HOST;
1842 break;
1843 }
1844
1845 return r;
1846}
1847
1848static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
1849 struct kvm_sregs *sregs)
1850{
1851 int i;
1852
1853 memset(sregs, 0, sizeof(struct kvm_sregs));
1854 sregs->pvr = vcpu->arch.pvr;
1855 for (i = 0; i < vcpu->arch.slb_max; i++) {
1856 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
1857 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1858 }
1859
1860 return 0;
1861}
1862
1863static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
1864 struct kvm_sregs *sregs)
1865{
1866 int i, j;
1867
1868
1869 if (sregs->pvr != vcpu->arch.pvr)
1870 return -EINVAL;
1871
1872 j = 0;
1873 for (i = 0; i < vcpu->arch.slb_nr; i++) {
1874 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
1875 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
1876 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
1877 ++j;
1878 }
1879 }
1880 vcpu->arch.slb_max = j;
1881
1882 return 0;
1883}
1884
1885
1886
1887
1888
1889
1890unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm, unsigned long lpcr)
1891{
1892
1893 if (kvm_is_radix(kvm))
1894 lpcr &= ~LPCR_TC;
1895
1896
1897 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1898 lpcr &= ~LPCR_AIL;
1899 if ((lpcr & LPCR_AIL) != LPCR_AIL_3)
1900 lpcr &= ~LPCR_AIL;
1901
1902
1903
1904
1905
1906
1907 if (kvm_is_radix(kvm) && cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
1908 lpcr &= ~LPCR_AIL;
1909
1910
1911
1912
1913
1914 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1915 lpcr &= ~LPCR_LD;
1916
1917 return lpcr;
1918}
1919
1920static void verify_lpcr(struct kvm *kvm, unsigned long lpcr)
1921{
1922 if (lpcr != kvmppc_filter_lpcr_hv(kvm, lpcr)) {
1923 WARN_ONCE(1, "lpcr 0x%lx differs from filtered 0x%lx\n",
1924 lpcr, kvmppc_filter_lpcr_hv(kvm, lpcr));
1925 }
1926}
1927
1928static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
1929 bool preserve_top32)
1930{
1931 struct kvm *kvm = vcpu->kvm;
1932 struct kvmppc_vcore *vc = vcpu->arch.vcore;
1933 u64 mask;
1934
1935 spin_lock(&vc->lock);
1936
1937
1938
1939
1940
1941
1942
1943
1944 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD;
1945
1946
1947 if (preserve_top32)
1948 mask &= 0xFFFFFFFF;
1949
1950 new_lpcr = kvmppc_filter_lpcr_hv(kvm,
1951 (vc->lpcr & ~mask) | (new_lpcr & mask));
1952
1953
1954
1955
1956
1957 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
1958 struct kvm_vcpu *vcpu;
1959 int i;
1960
1961 kvm_for_each_vcpu(i, vcpu, kvm) {
1962 if (vcpu->arch.vcore != vc)
1963 continue;
1964 if (new_lpcr & LPCR_ILE)
1965 vcpu->arch.intr_msr |= MSR_LE;
1966 else
1967 vcpu->arch.intr_msr &= ~MSR_LE;
1968 }
1969 }
1970
1971 vc->lpcr = new_lpcr;
1972
1973 spin_unlock(&vc->lock);
1974}
1975
1976static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1977 union kvmppc_one_reg *val)
1978{
1979 int r = 0;
1980 long int i;
1981
1982 switch (id) {
1983 case KVM_REG_PPC_DEBUG_INST:
1984 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1985 break;
1986 case KVM_REG_PPC_HIOR:
1987 *val = get_reg_val(id, 0);
1988 break;
1989 case KVM_REG_PPC_DABR:
1990 *val = get_reg_val(id, vcpu->arch.dabr);
1991 break;
1992 case KVM_REG_PPC_DABRX:
1993 *val = get_reg_val(id, vcpu->arch.dabrx);
1994 break;
1995 case KVM_REG_PPC_DSCR:
1996 *val = get_reg_val(id, vcpu->arch.dscr);
1997 break;
1998 case KVM_REG_PPC_PURR:
1999 *val = get_reg_val(id, vcpu->arch.purr);
2000 break;
2001 case KVM_REG_PPC_SPURR:
2002 *val = get_reg_val(id, vcpu->arch.spurr);
2003 break;
2004 case KVM_REG_PPC_AMR:
2005 *val = get_reg_val(id, vcpu->arch.amr);
2006 break;
2007 case KVM_REG_PPC_UAMOR:
2008 *val = get_reg_val(id, vcpu->arch.uamor);
2009 break;
2010 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
2011 i = id - KVM_REG_PPC_MMCR0;
2012 *val = get_reg_val(id, vcpu->arch.mmcr[i]);
2013 break;
2014 case KVM_REG_PPC_MMCR2:
2015 *val = get_reg_val(id, vcpu->arch.mmcr[2]);
2016 break;
2017 case KVM_REG_PPC_MMCRA:
2018 *val = get_reg_val(id, vcpu->arch.mmcra);
2019 break;
2020 case KVM_REG_PPC_MMCRS:
2021 *val = get_reg_val(id, vcpu->arch.mmcrs);
2022 break;
2023 case KVM_REG_PPC_MMCR3:
2024 *val = get_reg_val(id, vcpu->arch.mmcr[3]);
2025 break;
2026 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
2027 i = id - KVM_REG_PPC_PMC1;
2028 *val = get_reg_val(id, vcpu->arch.pmc[i]);
2029 break;
2030 case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
2031 i = id - KVM_REG_PPC_SPMC1;
2032 *val = get_reg_val(id, vcpu->arch.spmc[i]);
2033 break;
2034 case KVM_REG_PPC_SIAR:
2035 *val = get_reg_val(id, vcpu->arch.siar);
2036 break;
2037 case KVM_REG_PPC_SDAR:
2038 *val = get_reg_val(id, vcpu->arch.sdar);
2039 break;
2040 case KVM_REG_PPC_SIER:
2041 *val = get_reg_val(id, vcpu->arch.sier[0]);
2042 break;
2043 case KVM_REG_PPC_SIER2:
2044 *val = get_reg_val(id, vcpu->arch.sier[1]);
2045 break;
2046 case KVM_REG_PPC_SIER3:
2047 *val = get_reg_val(id, vcpu->arch.sier[2]);
2048 break;
2049 case KVM_REG_PPC_IAMR:
2050 *val = get_reg_val(id, vcpu->arch.iamr);
2051 break;
2052 case KVM_REG_PPC_PSPB:
2053 *val = get_reg_val(id, vcpu->arch.pspb);
2054 break;
2055 case KVM_REG_PPC_DPDES:
2056
2057
2058
2059
2060
2061
2062 *val = get_reg_val(id, vcpu->arch.vcore->dpdes |
2063 vcpu->arch.doorbell_request);
2064 break;
2065 case KVM_REG_PPC_VTB:
2066 *val = get_reg_val(id, vcpu->arch.vcore->vtb);
2067 break;
2068 case KVM_REG_PPC_DAWR:
2069 *val = get_reg_val(id, vcpu->arch.dawr0);
2070 break;
2071 case KVM_REG_PPC_DAWRX:
2072 *val = get_reg_val(id, vcpu->arch.dawrx0);
2073 break;
2074 case KVM_REG_PPC_DAWR1:
2075 *val = get_reg_val(id, vcpu->arch.dawr1);
2076 break;
2077 case KVM_REG_PPC_DAWRX1:
2078 *val = get_reg_val(id, vcpu->arch.dawrx1);
2079 break;
2080 case KVM_REG_PPC_CIABR:
2081 *val = get_reg_val(id, vcpu->arch.ciabr);
2082 break;
2083 case KVM_REG_PPC_CSIGR:
2084 *val = get_reg_val(id, vcpu->arch.csigr);
2085 break;
2086 case KVM_REG_PPC_TACR:
2087 *val = get_reg_val(id, vcpu->arch.tacr);
2088 break;
2089 case KVM_REG_PPC_TCSCR:
2090 *val = get_reg_val(id, vcpu->arch.tcscr);
2091 break;
2092 case KVM_REG_PPC_PID:
2093 *val = get_reg_val(id, vcpu->arch.pid);
2094 break;
2095 case KVM_REG_PPC_ACOP:
2096 *val = get_reg_val(id, vcpu->arch.acop);
2097 break;
2098 case KVM_REG_PPC_WORT:
2099 *val = get_reg_val(id, vcpu->arch.wort);
2100 break;
2101 case KVM_REG_PPC_TIDR:
2102 *val = get_reg_val(id, vcpu->arch.tid);
2103 break;
2104 case KVM_REG_PPC_PSSCR:
2105 *val = get_reg_val(id, vcpu->arch.psscr);
2106 break;
2107 case KVM_REG_PPC_VPA_ADDR:
2108 spin_lock(&vcpu->arch.vpa_update_lock);
2109 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
2110 spin_unlock(&vcpu->arch.vpa_update_lock);
2111 break;
2112 case KVM_REG_PPC_VPA_SLB:
2113 spin_lock(&vcpu->arch.vpa_update_lock);
2114 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
2115 val->vpaval.length = vcpu->arch.slb_shadow.len;
2116 spin_unlock(&vcpu->arch.vpa_update_lock);
2117 break;
2118 case KVM_REG_PPC_VPA_DTL:
2119 spin_lock(&vcpu->arch.vpa_update_lock);
2120 val->vpaval.addr = vcpu->arch.dtl.next_gpa;
2121 val->vpaval.length = vcpu->arch.dtl.len;
2122 spin_unlock(&vcpu->arch.vpa_update_lock);
2123 break;
2124 case KVM_REG_PPC_TB_OFFSET:
2125 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
2126 break;
2127 case KVM_REG_PPC_LPCR:
2128 case KVM_REG_PPC_LPCR_64:
2129 *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
2130 break;
2131 case KVM_REG_PPC_PPR:
2132 *val = get_reg_val(id, vcpu->arch.ppr);
2133 break;
2134#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2135 case KVM_REG_PPC_TFHAR:
2136 *val = get_reg_val(id, vcpu->arch.tfhar);
2137 break;
2138 case KVM_REG_PPC_TFIAR:
2139 *val = get_reg_val(id, vcpu->arch.tfiar);
2140 break;
2141 case KVM_REG_PPC_TEXASR:
2142 *val = get_reg_val(id, vcpu->arch.texasr);
2143 break;
2144 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
2145 i = id - KVM_REG_PPC_TM_GPR0;
2146 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]);
2147 break;
2148 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
2149 {
2150 int j;
2151 i = id - KVM_REG_PPC_TM_VSR0;
2152 if (i < 32)
2153 for (j = 0; j < TS_FPRWIDTH; j++)
2154 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
2155 else {
2156 if (cpu_has_feature(CPU_FTR_ALTIVEC))
2157 val->vval = vcpu->arch.vr_tm.vr[i-32];
2158 else
2159 r = -ENXIO;
2160 }
2161 break;
2162 }
2163 case KVM_REG_PPC_TM_CR:
2164 *val = get_reg_val(id, vcpu->arch.cr_tm);
2165 break;
2166 case KVM_REG_PPC_TM_XER:
2167 *val = get_reg_val(id, vcpu->arch.xer_tm);
2168 break;
2169 case KVM_REG_PPC_TM_LR:
2170 *val = get_reg_val(id, vcpu->arch.lr_tm);
2171 break;
2172 case KVM_REG_PPC_TM_CTR:
2173 *val = get_reg_val(id, vcpu->arch.ctr_tm);
2174 break;
2175 case KVM_REG_PPC_TM_FPSCR:
2176 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
2177 break;
2178 case KVM_REG_PPC_TM_AMR:
2179 *val = get_reg_val(id, vcpu->arch.amr_tm);
2180 break;
2181 case KVM_REG_PPC_TM_PPR:
2182 *val = get_reg_val(id, vcpu->arch.ppr_tm);
2183 break;
2184 case KVM_REG_PPC_TM_VRSAVE:
2185 *val = get_reg_val(id, vcpu->arch.vrsave_tm);
2186 break;
2187 case KVM_REG_PPC_TM_VSCR:
2188 if (cpu_has_feature(CPU_FTR_ALTIVEC))
2189 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
2190 else
2191 r = -ENXIO;
2192 break;
2193 case KVM_REG_PPC_TM_DSCR:
2194 *val = get_reg_val(id, vcpu->arch.dscr_tm);
2195 break;
2196 case KVM_REG_PPC_TM_TAR:
2197 *val = get_reg_val(id, vcpu->arch.tar_tm);
2198 break;
2199#endif
2200 case KVM_REG_PPC_ARCH_COMPAT:
2201 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
2202 break;
2203 case KVM_REG_PPC_DEC_EXPIRY:
2204 *val = get_reg_val(id, vcpu->arch.dec_expires +
2205 vcpu->arch.vcore->tb_offset);
2206 break;
2207 case KVM_REG_PPC_ONLINE:
2208 *val = get_reg_val(id, vcpu->arch.online);
2209 break;
2210 case KVM_REG_PPC_PTCR:
2211 *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr);
2212 break;
2213 default:
2214 r = -EINVAL;
2215 break;
2216 }
2217
2218 return r;
2219}
2220
2221static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
2222 union kvmppc_one_reg *val)
2223{
2224 int r = 0;
2225 long int i;
2226 unsigned long addr, len;
2227
2228 switch (id) {
2229 case KVM_REG_PPC_HIOR:
2230
2231 if (set_reg_val(id, *val))
2232 r = -EINVAL;
2233 break;
2234 case KVM_REG_PPC_DABR:
2235 vcpu->arch.dabr = set_reg_val(id, *val);
2236 break;
2237 case KVM_REG_PPC_DABRX:
2238 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
2239 break;
2240 case KVM_REG_PPC_DSCR:
2241 vcpu->arch.dscr = set_reg_val(id, *val);
2242 break;
2243 case KVM_REG_PPC_PURR:
2244 vcpu->arch.purr = set_reg_val(id, *val);
2245 break;
2246 case KVM_REG_PPC_SPURR:
2247 vcpu->arch.spurr = set_reg_val(id, *val);
2248 break;
2249 case KVM_REG_PPC_AMR:
2250 vcpu->arch.amr = set_reg_val(id, *val);
2251 break;
2252 case KVM_REG_PPC_UAMOR:
2253 vcpu->arch.uamor = set_reg_val(id, *val);
2254 break;
2255 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
2256 i = id - KVM_REG_PPC_MMCR0;
2257 vcpu->arch.mmcr[i] = set_reg_val(id, *val);
2258 break;
2259 case KVM_REG_PPC_MMCR2:
2260 vcpu->arch.mmcr[2] = set_reg_val(id, *val);
2261 break;
2262 case KVM_REG_PPC_MMCRA:
2263 vcpu->arch.mmcra = set_reg_val(id, *val);
2264 break;
2265 case KVM_REG_PPC_MMCRS:
2266 vcpu->arch.mmcrs = set_reg_val(id, *val);
2267 break;
2268 case KVM_REG_PPC_MMCR3:
2269 *val = get_reg_val(id, vcpu->arch.mmcr[3]);
2270 break;
2271 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
2272 i = id - KVM_REG_PPC_PMC1;
2273 vcpu->arch.pmc[i] = set_reg_val(id, *val);
2274 break;
2275 case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
2276 i = id - KVM_REG_PPC_SPMC1;
2277 vcpu->arch.spmc[i] = set_reg_val(id, *val);
2278 break;
2279 case KVM_REG_PPC_SIAR:
2280 vcpu->arch.siar = set_reg_val(id, *val);
2281 break;
2282 case KVM_REG_PPC_SDAR:
2283 vcpu->arch.sdar = set_reg_val(id, *val);
2284 break;
2285 case KVM_REG_PPC_SIER:
2286 vcpu->arch.sier[0] = set_reg_val(id, *val);
2287 break;
2288 case KVM_REG_PPC_SIER2:
2289 vcpu->arch.sier[1] = set_reg_val(id, *val);
2290 break;
2291 case KVM_REG_PPC_SIER3:
2292 vcpu->arch.sier[2] = set_reg_val(id, *val);
2293 break;
2294 case KVM_REG_PPC_IAMR:
2295 vcpu->arch.iamr = set_reg_val(id, *val);
2296 break;
2297 case KVM_REG_PPC_PSPB:
2298 vcpu->arch.pspb = set_reg_val(id, *val);
2299 break;
2300 case KVM_REG_PPC_DPDES:
2301 vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
2302 break;
2303 case KVM_REG_PPC_VTB:
2304 vcpu->arch.vcore->vtb = set_reg_val(id, *val);
2305 break;
2306 case KVM_REG_PPC_DAWR:
2307 vcpu->arch.dawr0 = set_reg_val(id, *val);
2308 break;
2309 case KVM_REG_PPC_DAWRX:
2310 vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP;
2311 break;
2312 case KVM_REG_PPC_DAWR1:
2313 vcpu->arch.dawr1 = set_reg_val(id, *val);
2314 break;
2315 case KVM_REG_PPC_DAWRX1:
2316 vcpu->arch.dawrx1 = set_reg_val(id, *val) & ~DAWRX_HYP;
2317 break;
2318 case KVM_REG_PPC_CIABR:
2319 vcpu->arch.ciabr = set_reg_val(id, *val);
2320
2321 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
2322 vcpu->arch.ciabr &= ~CIABR_PRIV;
2323 break;
2324 case KVM_REG_PPC_CSIGR:
2325 vcpu->arch.csigr = set_reg_val(id, *val);
2326 break;
2327 case KVM_REG_PPC_TACR:
2328 vcpu->arch.tacr = set_reg_val(id, *val);
2329 break;
2330 case KVM_REG_PPC_TCSCR:
2331 vcpu->arch.tcscr = set_reg_val(id, *val);
2332 break;
2333 case KVM_REG_PPC_PID:
2334 vcpu->arch.pid = set_reg_val(id, *val);
2335 break;
2336 case KVM_REG_PPC_ACOP:
2337 vcpu->arch.acop = set_reg_val(id, *val);
2338 break;
2339 case KVM_REG_PPC_WORT:
2340 vcpu->arch.wort = set_reg_val(id, *val);
2341 break;
2342 case KVM_REG_PPC_TIDR:
2343 vcpu->arch.tid = set_reg_val(id, *val);
2344 break;
2345 case KVM_REG_PPC_PSSCR:
2346 vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS;
2347 break;
2348 case KVM_REG_PPC_VPA_ADDR:
2349 addr = set_reg_val(id, *val);
2350 r = -EINVAL;
2351 if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
2352 vcpu->arch.dtl.next_gpa))
2353 break;
2354 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
2355 break;
2356 case KVM_REG_PPC_VPA_SLB:
2357 addr = val->vpaval.addr;
2358 len = val->vpaval.length;
2359 r = -EINVAL;
2360 if (addr && !vcpu->arch.vpa.next_gpa)
2361 break;
2362 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
2363 break;
2364 case KVM_REG_PPC_VPA_DTL:
2365 addr = val->vpaval.addr;
2366 len = val->vpaval.length;
2367 r = -EINVAL;
2368 if (addr && (len < sizeof(struct dtl_entry) ||
2369 !vcpu->arch.vpa.next_gpa))
2370 break;
2371 len -= len % sizeof(struct dtl_entry);
2372 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
2373 break;
2374 case KVM_REG_PPC_TB_OFFSET:
2375
2376 vcpu->arch.vcore->tb_offset =
2377 ALIGN(set_reg_val(id, *val), 1UL << 24);
2378 break;
2379 case KVM_REG_PPC_LPCR:
2380 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true);
2381 break;
2382 case KVM_REG_PPC_LPCR_64:
2383 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
2384 break;
2385 case KVM_REG_PPC_PPR:
2386 vcpu->arch.ppr = set_reg_val(id, *val);
2387 break;
2388#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2389 case KVM_REG_PPC_TFHAR:
2390 vcpu->arch.tfhar = set_reg_val(id, *val);
2391 break;
2392 case KVM_REG_PPC_TFIAR:
2393 vcpu->arch.tfiar = set_reg_val(id, *val);
2394 break;
2395 case KVM_REG_PPC_TEXASR:
2396 vcpu->arch.texasr = set_reg_val(id, *val);
2397 break;
2398 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
2399 i = id - KVM_REG_PPC_TM_GPR0;
2400 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val);
2401 break;
2402 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
2403 {
2404 int j;
2405 i = id - KVM_REG_PPC_TM_VSR0;
2406 if (i < 32)
2407 for (j = 0; j < TS_FPRWIDTH; j++)
2408 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
2409 else
2410 if (cpu_has_feature(CPU_FTR_ALTIVEC))
2411 vcpu->arch.vr_tm.vr[i-32] = val->vval;
2412 else
2413 r = -ENXIO;
2414 break;
2415 }
2416 case KVM_REG_PPC_TM_CR:
2417 vcpu->arch.cr_tm = set_reg_val(id, *val);
2418 break;
2419 case KVM_REG_PPC_TM_XER:
2420 vcpu->arch.xer_tm = set_reg_val(id, *val);
2421 break;
2422 case KVM_REG_PPC_TM_LR:
2423 vcpu->arch.lr_tm = set_reg_val(id, *val);
2424 break;
2425 case KVM_REG_PPC_TM_CTR:
2426 vcpu->arch.ctr_tm = set_reg_val(id, *val);
2427 break;
2428 case KVM_REG_PPC_TM_FPSCR:
2429 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
2430 break;
2431 case KVM_REG_PPC_TM_AMR:
2432 vcpu->arch.amr_tm = set_reg_val(id, *val);
2433 break;
2434 case KVM_REG_PPC_TM_PPR:
2435 vcpu->arch.ppr_tm = set_reg_val(id, *val);
2436 break;
2437 case KVM_REG_PPC_TM_VRSAVE:
2438 vcpu->arch.vrsave_tm = set_reg_val(id, *val);
2439 break;
2440 case KVM_REG_PPC_TM_VSCR:
2441 if (cpu_has_feature(CPU_FTR_ALTIVEC))
2442 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
2443 else
2444 r = - ENXIO;
2445 break;
2446 case KVM_REG_PPC_TM_DSCR:
2447 vcpu->arch.dscr_tm = set_reg_val(id, *val);
2448 break;
2449 case KVM_REG_PPC_TM_TAR:
2450 vcpu->arch.tar_tm = set_reg_val(id, *val);
2451 break;
2452#endif
2453 case KVM_REG_PPC_ARCH_COMPAT:
2454 r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
2455 break;
2456 case KVM_REG_PPC_DEC_EXPIRY:
2457 vcpu->arch.dec_expires = set_reg_val(id, *val) -
2458 vcpu->arch.vcore->tb_offset;
2459 break;
2460 case KVM_REG_PPC_ONLINE:
2461 i = set_reg_val(id, *val);
2462 if (i && !vcpu->arch.online)
2463 atomic_inc(&vcpu->arch.vcore->online_count);
2464 else if (!i && vcpu->arch.online)
2465 atomic_dec(&vcpu->arch.vcore->online_count);
2466 vcpu->arch.online = i;
2467 break;
2468 case KVM_REG_PPC_PTCR:
2469 vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val);
2470 break;
2471 default:
2472 r = -EINVAL;
2473 break;
2474 }
2475
2476 return r;
2477}
2478
2479
2480
2481
2482
2483
2484
2485
2486static int threads_per_vcore(struct kvm *kvm)
2487{
2488 if (cpu_has_feature(CPU_FTR_ARCH_300))
2489 return 1;
2490 return threads_per_subcore;
2491}
2492
2493static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int id)
2494{
2495 struct kvmppc_vcore *vcore;
2496
2497 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
2498
2499 if (vcore == NULL)
2500 return NULL;
2501
2502 spin_lock_init(&vcore->lock);
2503 spin_lock_init(&vcore->stoltb_lock);
2504 rcuwait_init(&vcore->wait);
2505 vcore->preempt_tb = TB_NIL;
2506 vcore->lpcr = kvm->arch.lpcr;
2507 vcore->first_vcpuid = id;
2508 vcore->kvm = kvm;
2509 INIT_LIST_HEAD(&vcore->preempt_list);
2510
2511 return vcore;
2512}
2513
2514#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2515static struct debugfs_timings_element {
2516 const char *name;
2517 size_t offset;
2518} timings[] = {
2519 {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)},
2520 {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)},
2521 {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)},
2522 {"guest", offsetof(struct kvm_vcpu, arch.guest_time)},
2523 {"cede", offsetof(struct kvm_vcpu, arch.cede_time)},
2524};
2525
2526#define N_TIMINGS (ARRAY_SIZE(timings))
2527
2528struct debugfs_timings_state {
2529 struct kvm_vcpu *vcpu;
2530 unsigned int buflen;
2531 char buf[N_TIMINGS * 100];
2532};
2533
2534static int debugfs_timings_open(struct inode *inode, struct file *file)
2535{
2536 struct kvm_vcpu *vcpu = inode->i_private;
2537 struct debugfs_timings_state *p;
2538
2539 p = kzalloc(sizeof(*p), GFP_KERNEL);
2540 if (!p)
2541 return -ENOMEM;
2542
2543 kvm_get_kvm(vcpu->kvm);
2544 p->vcpu = vcpu;
2545 file->private_data = p;
2546
2547 return nonseekable_open(inode, file);
2548}
2549
2550static int debugfs_timings_release(struct inode *inode, struct file *file)
2551{
2552 struct debugfs_timings_state *p = file->private_data;
2553
2554 kvm_put_kvm(p->vcpu->kvm);
2555 kfree(p);
2556 return 0;
2557}
2558
2559static ssize_t debugfs_timings_read(struct file *file, char __user *buf,
2560 size_t len, loff_t *ppos)
2561{
2562 struct debugfs_timings_state *p = file->private_data;
2563 struct kvm_vcpu *vcpu = p->vcpu;
2564 char *s, *buf_end;
2565 struct kvmhv_tb_accumulator tb;
2566 u64 count;
2567 loff_t pos;
2568 ssize_t n;
2569 int i, loops;
2570 bool ok;
2571
2572 if (!p->buflen) {
2573 s = p->buf;
2574 buf_end = s + sizeof(p->buf);
2575 for (i = 0; i < N_TIMINGS; ++i) {
2576 struct kvmhv_tb_accumulator *acc;
2577
2578 acc = (struct kvmhv_tb_accumulator *)
2579 ((unsigned long)vcpu + timings[i].offset);
2580 ok = false;
2581 for (loops = 0; loops < 1000; ++loops) {
2582 count = acc->seqcount;
2583 if (!(count & 1)) {
2584 smp_rmb();
2585 tb = *acc;
2586 smp_rmb();
2587 if (count == acc->seqcount) {
2588 ok = true;
2589 break;
2590 }
2591 }
2592 udelay(1);
2593 }
2594 if (!ok)
2595 snprintf(s, buf_end - s, "%s: stuck\n",
2596 timings[i].name);
2597 else
2598 snprintf(s, buf_end - s,
2599 "%s: %llu %llu %llu %llu\n",
2600 timings[i].name, count / 2,
2601 tb_to_ns(tb.tb_total),
2602 tb_to_ns(tb.tb_min),
2603 tb_to_ns(tb.tb_max));
2604 s += strlen(s);
2605 }
2606 p->buflen = s - p->buf;
2607 }
2608
2609 pos = *ppos;
2610 if (pos >= p->buflen)
2611 return 0;
2612 if (len > p->buflen - pos)
2613 len = p->buflen - pos;
2614 n = copy_to_user(buf, p->buf + pos, len);
2615 if (n) {
2616 if (n == len)
2617 return -EFAULT;
2618 len -= n;
2619 }
2620 *ppos = pos + len;
2621 return len;
2622}
2623
2624static ssize_t debugfs_timings_write(struct file *file, const char __user *buf,
2625 size_t len, loff_t *ppos)
2626{
2627 return -EACCES;
2628}
2629
2630static const struct file_operations debugfs_timings_ops = {
2631 .owner = THIS_MODULE,
2632 .open = debugfs_timings_open,
2633 .release = debugfs_timings_release,
2634 .read = debugfs_timings_read,
2635 .write = debugfs_timings_write,
2636 .llseek = generic_file_llseek,
2637};
2638
2639
2640static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
2641{
2642 char buf[16];
2643 struct kvm *kvm = vcpu->kvm;
2644
2645 snprintf(buf, sizeof(buf), "vcpu%u", id);
2646 vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir);
2647 debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir, vcpu,
2648 &debugfs_timings_ops);
2649}
2650
2651#else
2652static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
2653{
2654}
2655#endif
2656
2657static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
2658{
2659 int err;
2660 int core;
2661 struct kvmppc_vcore *vcore;
2662 struct kvm *kvm;
2663 unsigned int id;
2664
2665 kvm = vcpu->kvm;
2666 id = vcpu->vcpu_id;
2667
2668 vcpu->arch.shared = &vcpu->arch.shregs;
2669#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2670
2671
2672
2673
2674#ifdef __BIG_ENDIAN__
2675 vcpu->arch.shared_big_endian = true;
2676#else
2677 vcpu->arch.shared_big_endian = false;
2678#endif
2679#endif
2680 vcpu->arch.mmcr[0] = MMCR0_FC;
2681 vcpu->arch.ctrl = CTRL_RUNLATCH;
2682
2683 kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
2684 spin_lock_init(&vcpu->arch.vpa_update_lock);
2685 spin_lock_init(&vcpu->arch.tbacct_lock);
2686 vcpu->arch.busy_preempt = TB_NIL;
2687 vcpu->arch.intr_msr = MSR_SF | MSR_ME;
2688
2689
2690
2691
2692
2693
2694
2695
2696 vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
2697 HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP | HFSCR_PREFIX;
2698 if (cpu_has_feature(CPU_FTR_HVMODE)) {
2699 vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
2700#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2701 if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
2702 vcpu->arch.hfscr |= HFSCR_TM;
2703#endif
2704 }
2705 if (cpu_has_feature(CPU_FTR_TM_COMP))
2706 vcpu->arch.hfscr |= HFSCR_TM;
2707
2708 kvmppc_mmu_book3s_hv_init(vcpu);
2709
2710 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
2711
2712 init_waitqueue_head(&vcpu->arch.cpu_run);
2713
2714 mutex_lock(&kvm->lock);
2715 vcore = NULL;
2716 err = -EINVAL;
2717 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
2718 if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) {
2719 pr_devel("KVM: VCPU ID too high\n");
2720 core = KVM_MAX_VCORES;
2721 } else {
2722 BUG_ON(kvm->arch.smt_mode != 1);
2723 core = kvmppc_pack_vcpu_id(kvm, id);
2724 }
2725 } else {
2726 core = id / kvm->arch.smt_mode;
2727 }
2728 if (core < KVM_MAX_VCORES) {
2729 vcore = kvm->arch.vcores[core];
2730 if (vcore && cpu_has_feature(CPU_FTR_ARCH_300)) {
2731 pr_devel("KVM: collision on id %u", id);
2732 vcore = NULL;
2733 } else if (!vcore) {
2734
2735
2736
2737
2738 err = -ENOMEM;
2739 vcore = kvmppc_vcore_create(kvm,
2740 id & ~(kvm->arch.smt_mode - 1));
2741 mutex_lock(&kvm->arch.mmu_setup_lock);
2742 kvm->arch.vcores[core] = vcore;
2743 kvm->arch.online_vcores++;
2744 mutex_unlock(&kvm->arch.mmu_setup_lock);
2745 }
2746 }
2747 mutex_unlock(&kvm->lock);
2748
2749 if (!vcore)
2750 return err;
2751
2752 spin_lock(&vcore->lock);
2753 ++vcore->num_threads;
2754 spin_unlock(&vcore->lock);
2755 vcpu->arch.vcore = vcore;
2756 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
2757 vcpu->arch.thread_cpu = -1;
2758 vcpu->arch.prev_cpu = -1;
2759
2760 vcpu->arch.cpu_type = KVM_CPU_3S_64;
2761 kvmppc_sanity_check(vcpu);
2762
2763 debugfs_vcpu_init(vcpu, id);
2764
2765 return 0;
2766}
2767
2768static int kvmhv_set_smt_mode(struct kvm *kvm, unsigned long smt_mode,
2769 unsigned long flags)
2770{
2771 int err;
2772 int esmt = 0;
2773
2774 if (flags)
2775 return -EINVAL;
2776 if (smt_mode > MAX_SMT_THREADS || !is_power_of_2(smt_mode))
2777 return -EINVAL;
2778 if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
2779
2780
2781
2782
2783 if (smt_mode > threads_per_subcore)
2784 return -EINVAL;
2785 } else {
2786
2787
2788
2789
2790 esmt = smt_mode;
2791 smt_mode = 1;
2792 }
2793 mutex_lock(&kvm->lock);
2794 err = -EBUSY;
2795 if (!kvm->arch.online_vcores) {
2796 kvm->arch.smt_mode = smt_mode;
2797 kvm->arch.emul_smt_mode = esmt;
2798 err = 0;
2799 }
2800 mutex_unlock(&kvm->lock);
2801
2802 return err;
2803}
2804
2805static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
2806{
2807 if (vpa->pinned_addr)
2808 kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
2809 vpa->dirty);
2810}
2811
2812static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
2813{
2814 spin_lock(&vcpu->arch.vpa_update_lock);
2815 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
2816 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
2817 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
2818 spin_unlock(&vcpu->arch.vpa_update_lock);
2819}
2820
2821static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
2822{
2823
2824 return 1;
2825}
2826
2827static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
2828{
2829 unsigned long dec_nsec, now;
2830
2831 now = get_tb();
2832 if (now > vcpu->arch.dec_expires) {
2833
2834 kvmppc_core_queue_dec(vcpu);
2835 kvmppc_core_prepare_to_enter(vcpu);
2836 return;
2837 }
2838 dec_nsec = tb_to_ns(vcpu->arch.dec_expires - now);
2839 hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL);
2840 vcpu->arch.timer_running = 1;
2841}
2842
2843extern int __kvmppc_vcore_entry(void);
2844
2845static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
2846 struct kvm_vcpu *vcpu)
2847{
2848 u64 now;
2849
2850 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
2851 return;
2852 spin_lock_irq(&vcpu->arch.tbacct_lock);
2853 now = mftb();
2854 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
2855 vcpu->arch.stolen_logged;
2856 vcpu->arch.busy_preempt = now;
2857 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
2858 spin_unlock_irq(&vcpu->arch.tbacct_lock);
2859 --vc->n_runnable;
2860 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL);
2861}
2862
2863static int kvmppc_grab_hwthread(int cpu)
2864{
2865 struct paca_struct *tpaca;
2866 long timeout = 10000;
2867
2868 tpaca = paca_ptrs[cpu];
2869
2870
2871 tpaca->kvm_hstate.kvm_vcpu = NULL;
2872 tpaca->kvm_hstate.kvm_vcore = NULL;
2873 tpaca->kvm_hstate.napping = 0;
2874 smp_wmb();
2875 tpaca->kvm_hstate.hwthread_req = 1;
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886 smp_mb();
2887 while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) {
2888 if (--timeout <= 0) {
2889 pr_err("KVM: couldn't grab cpu %d\n", cpu);
2890 return -EBUSY;
2891 }
2892 udelay(1);
2893 }
2894 return 0;
2895}
2896
2897static void kvmppc_release_hwthread(int cpu)
2898{
2899 struct paca_struct *tpaca;
2900
2901 tpaca = paca_ptrs[cpu];
2902 tpaca->kvm_hstate.hwthread_req = 0;
2903 tpaca->kvm_hstate.kvm_vcpu = NULL;
2904 tpaca->kvm_hstate.kvm_vcore = NULL;
2905 tpaca->kvm_hstate.kvm_split_mode = NULL;
2906}
2907
2908static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
2909{
2910 struct kvm_nested_guest *nested = vcpu->arch.nested;
2911 cpumask_t *cpu_in_guest;
2912 int i;
2913
2914 cpu = cpu_first_tlb_thread_sibling(cpu);
2915 if (nested) {
2916 cpumask_set_cpu(cpu, &nested->need_tlb_flush);
2917 cpu_in_guest = &nested->cpu_in_guest;
2918 } else {
2919 cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush);
2920 cpu_in_guest = &kvm->arch.cpu_in_guest;
2921 }
2922
2923
2924
2925
2926
2927 smp_mb();
2928 for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
2929 i += cpu_tlb_thread_sibling_step())
2930 if (cpumask_test_cpu(i, cpu_in_guest))
2931 smp_call_function_single(i, do_nothing, NULL, 1);
2932}
2933
2934static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
2935{
2936 struct kvm_nested_guest *nested = vcpu->arch.nested;
2937 struct kvm *kvm = vcpu->kvm;
2938 int prev_cpu;
2939
2940 if (!cpu_has_feature(CPU_FTR_HVMODE))
2941 return;
2942
2943 if (nested)
2944 prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id];
2945 else
2946 prev_cpu = vcpu->arch.prev_cpu;
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960 if (prev_cpu != pcpu) {
2961 if (prev_cpu >= 0 &&
2962 cpu_first_tlb_thread_sibling(prev_cpu) !=
2963 cpu_first_tlb_thread_sibling(pcpu))
2964 radix_flush_cpu(kvm, prev_cpu, vcpu);
2965 if (nested)
2966 nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
2967 else
2968 vcpu->arch.prev_cpu = pcpu;
2969 }
2970}
2971
2972static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
2973{
2974 int cpu;
2975 struct paca_struct *tpaca;
2976 struct kvm *kvm = vc->kvm;
2977
2978 cpu = vc->pcpu;
2979 if (vcpu) {
2980 if (vcpu->arch.timer_running) {
2981 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
2982 vcpu->arch.timer_running = 0;
2983 }
2984 cpu += vcpu->arch.ptid;
2985 vcpu->cpu = vc->pcpu;
2986 vcpu->arch.thread_cpu = cpu;
2987 cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest);
2988 }
2989 tpaca = paca_ptrs[cpu];
2990 tpaca->kvm_hstate.kvm_vcpu = vcpu;
2991 tpaca->kvm_hstate.ptid = cpu - vc->pcpu;
2992 tpaca->kvm_hstate.fake_suspend = 0;
2993
2994 smp_wmb();
2995 tpaca->kvm_hstate.kvm_vcore = vc;
2996 if (cpu != smp_processor_id())
2997 kvmppc_ipi_thread(cpu);
2998}
2999
3000static void kvmppc_wait_for_nap(int n_threads)
3001{
3002 int cpu = smp_processor_id();
3003 int i, loops;
3004
3005 if (n_threads <= 1)
3006 return;
3007 for (loops = 0; loops < 1000000; ++loops) {
3008
3009
3010
3011
3012
3013
3014 for (i = 1; i < n_threads; ++i)
3015 if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore)
3016 break;
3017 if (i == n_threads) {
3018 HMT_medium();
3019 return;
3020 }
3021 HMT_low();
3022 }
3023 HMT_medium();
3024 for (i = 1; i < n_threads; ++i)
3025 if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore)
3026 pr_err("KVM: CPU %d seems to be stuck\n", cpu + i);
3027}
3028
3029
3030
3031
3032
3033
3034static int on_primary_thread(void)
3035{
3036 int cpu = smp_processor_id();
3037 int thr;
3038
3039
3040 if (cpu_thread_in_subcore(cpu))
3041 return 0;
3042
3043 thr = 0;
3044 while (++thr < threads_per_subcore)
3045 if (cpu_online(cpu + thr))
3046 return 0;
3047
3048
3049 for (thr = 1; thr < threads_per_subcore; ++thr) {
3050 if (kvmppc_grab_hwthread(cpu + thr)) {
3051
3052 do {
3053 kvmppc_release_hwthread(cpu + thr);
3054 } while (--thr > 0);
3055 return 0;
3056 }
3057 }
3058 return 1;
3059}
3060
3061
3062
3063
3064
3065
3066struct preempted_vcore_list {
3067 struct list_head list;
3068 spinlock_t lock;
3069};
3070
3071static DEFINE_PER_CPU(struct preempted_vcore_list, preempted_vcores);
3072
3073static void init_vcore_lists(void)
3074{
3075 int cpu;
3076
3077 for_each_possible_cpu(cpu) {
3078 struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu);
3079 spin_lock_init(&lp->lock);
3080 INIT_LIST_HEAD(&lp->list);
3081 }
3082}
3083
3084static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
3085{
3086 struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
3087
3088 vc->vcore_state = VCORE_PREEMPT;
3089 vc->pcpu = smp_processor_id();
3090 if (vc->num_threads < threads_per_vcore(vc->kvm)) {
3091 spin_lock(&lp->lock);
3092 list_add_tail(&vc->preempt_list, &lp->list);
3093 spin_unlock(&lp->lock);
3094 }
3095
3096
3097 kvmppc_core_start_stolen(vc);
3098}
3099
3100static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc)
3101{
3102 struct preempted_vcore_list *lp;
3103
3104 kvmppc_core_end_stolen(vc);
3105 if (!list_empty(&vc->preempt_list)) {
3106 lp = &per_cpu(preempted_vcores, vc->pcpu);
3107 spin_lock(&lp->lock);
3108 list_del_init(&vc->preempt_list);
3109 spin_unlock(&lp->lock);
3110 }
3111 vc->vcore_state = VCORE_INACTIVE;
3112}
3113
3114
3115
3116
3117
3118struct core_info {
3119 int n_subcores;
3120 int max_subcore_threads;
3121 int total_threads;
3122 int subcore_threads[MAX_SUBCORES];
3123 struct kvmppc_vcore *vc[MAX_SUBCORES];
3124};
3125
3126
3127
3128
3129
3130static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 };
3131
3132static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc)
3133{
3134 memset(cip, 0, sizeof(*cip));
3135 cip->n_subcores = 1;
3136 cip->max_subcore_threads = vc->num_threads;
3137 cip->total_threads = vc->num_threads;
3138 cip->subcore_threads[0] = vc->num_threads;
3139 cip->vc[0] = vc;
3140}
3141
3142static bool subcore_config_ok(int n_subcores, int n_threads)
3143{
3144
3145
3146
3147
3148 if (cpu_has_feature(CPU_FTR_ARCH_300))
3149 return n_subcores <= 4 && n_threads == 1;
3150
3151
3152 if (n_subcores > 1 && threads_per_subcore < MAX_SMT_THREADS)
3153 return false;
3154 if (n_subcores > MAX_SUBCORES)
3155 return false;
3156 if (n_subcores > 1) {
3157 if (!(dynamic_mt_modes & 2))
3158 n_subcores = 4;
3159 if (n_subcores > 2 && !(dynamic_mt_modes & 4))
3160 return false;
3161 }
3162
3163 return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS;
3164}
3165
3166static void init_vcore_to_run(struct kvmppc_vcore *vc)
3167{
3168 vc->entry_exit_map = 0;
3169 vc->in_guest = 0;
3170 vc->napping_threads = 0;
3171 vc->conferring_threads = 0;
3172 vc->tb_offset_applied = 0;
3173}
3174
3175static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
3176{
3177 int n_threads = vc->num_threads;
3178 int sub;
3179
3180 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
3181 return false;
3182
3183
3184 if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm)
3185 return false;
3186
3187 if (n_threads < cip->max_subcore_threads)
3188 n_threads = cip->max_subcore_threads;
3189 if (!subcore_config_ok(cip->n_subcores + 1, n_threads))
3190 return false;
3191 cip->max_subcore_threads = n_threads;
3192
3193 sub = cip->n_subcores;
3194 ++cip->n_subcores;
3195 cip->total_threads += vc->num_threads;
3196 cip->subcore_threads[sub] = vc->num_threads;
3197 cip->vc[sub] = vc;
3198 init_vcore_to_run(vc);
3199 list_del_init(&vc->preempt_list);
3200
3201 return true;
3202}
3203
3204
3205
3206
3207
3208static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip,
3209 int target_threads)
3210{
3211 if (cip->total_threads + pvc->num_threads > target_threads)
3212 return false;
3213
3214 return can_dynamic_split(pvc, cip);
3215}
3216
3217static void prepare_threads(struct kvmppc_vcore *vc)
3218{
3219 int i;
3220 struct kvm_vcpu *vcpu;
3221
3222 for_each_runnable_thread(i, vcpu, vc) {
3223 if (signal_pending(vcpu->arch.run_task))
3224 vcpu->arch.ret = -EINTR;
3225 else if (vcpu->arch.vpa.update_pending ||
3226 vcpu->arch.slb_shadow.update_pending ||
3227 vcpu->arch.dtl.update_pending)
3228 vcpu->arch.ret = RESUME_GUEST;
3229 else
3230 continue;
3231 kvmppc_remove_runnable(vc, vcpu);
3232 wake_up(&vcpu->arch.cpu_run);
3233 }
3234}
3235
3236static void collect_piggybacks(struct core_info *cip, int target_threads)
3237{
3238 struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
3239 struct kvmppc_vcore *pvc, *vcnext;
3240
3241 spin_lock(&lp->lock);
3242 list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) {
3243 if (!spin_trylock(&pvc->lock))
3244 continue;
3245 prepare_threads(pvc);
3246 if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) {
3247 list_del_init(&pvc->preempt_list);
3248 if (pvc->runner == NULL) {
3249 pvc->vcore_state = VCORE_INACTIVE;
3250 kvmppc_core_end_stolen(pvc);
3251 }
3252 spin_unlock(&pvc->lock);
3253 continue;
3254 }
3255 if (!can_piggyback(pvc, cip, target_threads)) {
3256 spin_unlock(&pvc->lock);
3257 continue;
3258 }
3259 kvmppc_core_end_stolen(pvc);
3260 pvc->vcore_state = VCORE_PIGGYBACK;
3261 if (cip->total_threads >= target_threads)
3262 break;
3263 }
3264 spin_unlock(&lp->lock);
3265}
3266
3267static bool recheck_signals_and_mmu(struct core_info *cip)
3268{
3269 int sub, i;
3270 struct kvm_vcpu *vcpu;
3271 struct kvmppc_vcore *vc;
3272
3273 for (sub = 0; sub < cip->n_subcores; ++sub) {
3274 vc = cip->vc[sub];
3275 if (!vc->kvm->arch.mmu_ready)
3276 return true;
3277 for_each_runnable_thread(i, vcpu, vc)
3278 if (signal_pending(vcpu->arch.run_task))
3279 return true;
3280 }
3281 return false;
3282}
3283
3284static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
3285{
3286 int still_running = 0, i;
3287 u64 now;
3288 long ret;
3289 struct kvm_vcpu *vcpu;
3290
3291 spin_lock(&vc->lock);
3292 now = get_tb();
3293 for_each_runnable_thread(i, vcpu, vc) {
3294
3295
3296
3297
3298
3299
3300
3301 spin_unlock(&vc->lock);
3302
3303 if (now < vcpu->arch.dec_expires &&
3304 kvmppc_core_pending_dec(vcpu))
3305 kvmppc_core_dequeue_dec(vcpu);
3306
3307 trace_kvm_guest_exit(vcpu);
3308
3309 ret = RESUME_GUEST;
3310 if (vcpu->arch.trap)
3311 ret = kvmppc_handle_exit_hv(vcpu,
3312 vcpu->arch.run_task);
3313
3314 vcpu->arch.ret = ret;
3315 vcpu->arch.trap = 0;
3316
3317 spin_lock(&vc->lock);
3318 if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
3319 if (vcpu->arch.pending_exceptions)
3320 kvmppc_core_prepare_to_enter(vcpu);
3321 if (vcpu->arch.ceded)
3322 kvmppc_set_timer(vcpu);
3323 else
3324 ++still_running;
3325 } else {
3326 kvmppc_remove_runnable(vc, vcpu);
3327 wake_up(&vcpu->arch.cpu_run);
3328 }
3329 }
3330 if (!is_master) {
3331 if (still_running > 0) {
3332 kvmppc_vcore_preempt(vc);
3333 } else if (vc->runner) {
3334 vc->vcore_state = VCORE_PREEMPT;
3335 kvmppc_core_start_stolen(vc);
3336 } else {
3337 vc->vcore_state = VCORE_INACTIVE;
3338 }
3339 if (vc->n_runnable > 0 && vc->runner == NULL) {
3340
3341 i = -1;
3342 vcpu = next_runnable_thread(vc, &i);
3343 wake_up(&vcpu->arch.cpu_run);
3344 }
3345 }
3346 spin_unlock(&vc->lock);
3347}
3348
3349
3350
3351
3352
3353
3354static inline int kvmppc_clear_host_core(unsigned int cpu)
3355{
3356 int core;
3357
3358 if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
3359 return 0;
3360
3361
3362
3363
3364
3365 core = cpu >> threads_shift;
3366 kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0;
3367 return 0;
3368}
3369
3370
3371
3372
3373
3374
3375static inline int kvmppc_set_host_core(unsigned int cpu)
3376{
3377 int core;
3378
3379 if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
3380 return 0;
3381
3382
3383
3384
3385
3386 core = cpu >> threads_shift;
3387 kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1;
3388 return 0;
3389}
3390
3391static void set_irq_happened(int trap)
3392{
3393 switch (trap) {
3394 case BOOK3S_INTERRUPT_EXTERNAL:
3395 local_paca->irq_happened |= PACA_IRQ_EE;
3396 break;
3397 case BOOK3S_INTERRUPT_H_DOORBELL:
3398 local_paca->irq_happened |= PACA_IRQ_DBELL;
3399 break;
3400 case BOOK3S_INTERRUPT_HMI:
3401 local_paca->irq_happened |= PACA_IRQ_HMI;
3402 break;
3403 case BOOK3S_INTERRUPT_SYSTEM_RESET:
3404 replay_system_reset();
3405 break;
3406 }
3407}
3408
3409
3410
3411
3412
3413static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
3414{
3415 struct kvm_vcpu *vcpu;
3416 int i;
3417 int srcu_idx;
3418 struct core_info core_info;
3419 struct kvmppc_vcore *pvc;
3420 struct kvm_split_mode split_info, *sip;
3421 int split, subcore_size, active;
3422 int sub;
3423 bool thr0_done;
3424 unsigned long cmd_bit, stat_bit;
3425 int pcpu, thr;
3426 int target_threads;
3427 int controlled_threads;
3428 int trap;
3429 bool is_power8;
3430
3431 if (WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)))
3432 return;
3433
3434
3435
3436
3437
3438 prepare_threads(vc);
3439
3440
3441 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE)
3442 return;
3443
3444
3445
3446
3447 init_vcore_to_run(vc);
3448 vc->preempt_tb = TB_NIL;
3449
3450
3451
3452
3453
3454
3455 controlled_threads = threads_per_vcore(vc->kvm);
3456
3457
3458
3459
3460
3461
3462 if ((controlled_threads > 1) &&
3463 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
3464 for_each_runnable_thread(i, vcpu, vc) {
3465 vcpu->arch.ret = -EBUSY;
3466 kvmppc_remove_runnable(vc, vcpu);
3467 wake_up(&vcpu->arch.cpu_run);
3468 }
3469 goto out;
3470 }
3471
3472
3473
3474
3475
3476 init_core_info(&core_info, vc);
3477 pcpu = smp_processor_id();
3478 target_threads = controlled_threads;
3479 if (target_smt_mode && target_smt_mode < target_threads)
3480 target_threads = target_smt_mode;
3481 if (vc->num_threads < target_threads)
3482 collect_piggybacks(&core_info, target_threads);
3483
3484
3485
3486
3487
3488
3489
3490
3491 local_irq_disable();
3492 hard_irq_disable();
3493 if (lazy_irq_pending() || need_resched() ||
3494 recheck_signals_and_mmu(&core_info)) {
3495 local_irq_enable();
3496 vc->vcore_state = VCORE_INACTIVE;
3497
3498 for (sub = 1; sub < core_info.n_subcores; ++sub) {
3499 pvc = core_info.vc[sub];
3500
3501 kvmppc_vcore_preempt(pvc);
3502 spin_unlock(&pvc->lock);
3503 }
3504 for (i = 0; i < controlled_threads; ++i)
3505 kvmppc_release_hwthread(pcpu + i);
3506 return;
3507 }
3508
3509 kvmppc_clear_host_core(pcpu);
3510
3511
3512 subcore_size = threads_per_subcore;
3513 cmd_bit = stat_bit = 0;
3514 split = core_info.n_subcores;
3515 sip = NULL;
3516 is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S);
3517
3518 if (split > 1) {
3519 sip = &split_info;
3520 memset(&split_info, 0, sizeof(split_info));
3521 for (sub = 0; sub < core_info.n_subcores; ++sub)
3522 split_info.vc[sub] = core_info.vc[sub];
3523
3524 if (is_power8) {
3525 if (split == 2 && (dynamic_mt_modes & 2)) {
3526 cmd_bit = HID0_POWER8_1TO2LPAR;
3527 stat_bit = HID0_POWER8_2LPARMODE;
3528 } else {
3529 split = 4;
3530 cmd_bit = HID0_POWER8_1TO4LPAR;
3531 stat_bit = HID0_POWER8_4LPARMODE;
3532 }
3533 subcore_size = MAX_SMT_THREADS / split;
3534 split_info.rpr = mfspr(SPRN_RPR);
3535 split_info.pmmar = mfspr(SPRN_PMMAR);
3536 split_info.ldbar = mfspr(SPRN_LDBAR);
3537 split_info.subcore_size = subcore_size;
3538 } else {
3539 split_info.subcore_size = 1;
3540 }
3541
3542
3543 smp_wmb();
3544 }
3545
3546 for (thr = 0; thr < controlled_threads; ++thr) {
3547 struct paca_struct *paca = paca_ptrs[pcpu + thr];
3548
3549 paca->kvm_hstate.napping = 0;
3550 paca->kvm_hstate.kvm_split_mode = sip;
3551 }
3552
3553
3554 if (cmd_bit) {
3555 unsigned long hid0 = mfspr(SPRN_HID0);
3556
3557 hid0 |= cmd_bit | HID0_POWER8_DYNLPARDIS;
3558 mb();
3559 mtspr(SPRN_HID0, hid0);
3560 isync();
3561 for (;;) {
3562 hid0 = mfspr(SPRN_HID0);
3563 if (hid0 & stat_bit)
3564 break;
3565 cpu_relax();
3566 }
3567 }
3568
3569
3570
3571
3572
3573
3574 if (is_power8) {
3575 unsigned long rwmr_val = RWMR_RPA_P8_8THREAD;
3576 int n_online = atomic_read(&vc->online_count);
3577
3578
3579
3580
3581
3582 if (split == 1 && threads_per_subcore == MAX_SMT_THREADS &&
3583 n_online >= 1 && n_online <= MAX_SMT_THREADS)
3584 rwmr_val = p8_rwmr_values[n_online];
3585 mtspr(SPRN_RWMR, rwmr_val);
3586 }
3587
3588
3589 active = 0;
3590 for (sub = 0; sub < core_info.n_subcores; ++sub) {
3591 thr = is_power8 ? subcore_thread_map[sub] : sub;
3592 thr0_done = false;
3593 active |= 1 << thr;
3594 pvc = core_info.vc[sub];
3595 pvc->pcpu = pcpu + thr;
3596 for_each_runnable_thread(i, vcpu, pvc) {
3597 kvmppc_start_thread(vcpu, pvc);
3598 kvmppc_create_dtl_entry(vcpu, pvc);
3599 trace_kvm_guest_enter(vcpu);
3600 if (!vcpu->arch.ptid)
3601 thr0_done = true;
3602 active |= 1 << (thr + vcpu->arch.ptid);
3603 }
3604
3605
3606
3607
3608 if (!thr0_done)
3609 kvmppc_start_thread(NULL, pvc);
3610 }
3611
3612
3613
3614
3615
3616 smp_mb();
3617
3618
3619
3620
3621
3622
3623 if (cmd_bit) {
3624 split_info.do_nap = 1;
3625 for (thr = 1; thr < threads_per_subcore; ++thr)
3626 if (!(active & (1 << thr)))
3627 kvmppc_ipi_thread(pcpu + thr);
3628 }
3629
3630 vc->vcore_state = VCORE_RUNNING;
3631 preempt_disable();
3632
3633 trace_kvmppc_run_core(vc, 0);
3634
3635 for (sub = 0; sub < core_info.n_subcores; ++sub)
3636 spin_unlock(&core_info.vc[sub]->lock);
3637
3638 guest_enter_irqoff();
3639
3640 srcu_idx = srcu_read_lock(&vc->kvm->srcu);
3641
3642 this_cpu_disable_ftrace();
3643
3644
3645
3646
3647
3648 trace_hardirqs_on();
3649
3650 trap = __kvmppc_vcore_entry();
3651
3652 trace_hardirqs_off();
3653
3654 this_cpu_enable_ftrace();
3655
3656 srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
3657
3658 set_irq_happened(trap);
3659
3660 spin_lock(&vc->lock);
3661
3662 vc->vcore_state = VCORE_EXITING;
3663
3664
3665 kvmppc_wait_for_nap(controlled_threads);
3666
3667
3668 if (cmd_bit) {
3669 unsigned long hid0 = mfspr(SPRN_HID0);
3670 unsigned long loops = 0;
3671
3672 hid0 &= ~HID0_POWER8_DYNLPARDIS;
3673 stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE;
3674 mb();
3675 mtspr(SPRN_HID0, hid0);
3676 isync();
3677 for (;;) {
3678 hid0 = mfspr(SPRN_HID0);
3679 if (!(hid0 & stat_bit))
3680 break;
3681 cpu_relax();
3682 ++loops;
3683 }
3684 split_info.do_nap = 0;
3685 }
3686
3687 kvmppc_set_host_core(pcpu);
3688
3689 guest_exit_irqoff();
3690
3691 local_irq_enable();
3692
3693
3694 for (i = 0; i < controlled_threads; ++i) {
3695 kvmppc_release_hwthread(pcpu + i);
3696 if (sip && sip->napped[i])
3697 kvmppc_ipi_thread(pcpu + i);
3698 cpumask_clear_cpu(pcpu + i, &vc->kvm->arch.cpu_in_guest);
3699 }
3700
3701 spin_unlock(&vc->lock);
3702
3703
3704 smp_mb();
3705
3706 preempt_enable();
3707
3708 for (sub = 0; sub < core_info.n_subcores; ++sub) {
3709 pvc = core_info.vc[sub];
3710 post_guest_process(pvc, pvc == vc);
3711 }
3712
3713 spin_lock(&vc->lock);
3714
3715 out:
3716 vc->vcore_state = VCORE_INACTIVE;
3717 trace_kvmppc_run_core(vc, 1);
3718}
3719
3720static void load_spr_state(struct kvm_vcpu *vcpu)
3721{
3722 mtspr(SPRN_DSCR, vcpu->arch.dscr);
3723 mtspr(SPRN_IAMR, vcpu->arch.iamr);
3724 mtspr(SPRN_PSPB, vcpu->arch.pspb);
3725 mtspr(SPRN_FSCR, vcpu->arch.fscr);
3726 mtspr(SPRN_TAR, vcpu->arch.tar);
3727 mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
3728 mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
3729 mtspr(SPRN_BESCR, vcpu->arch.bescr);
3730 mtspr(SPRN_WORT, vcpu->arch.wort);
3731 mtspr(SPRN_TIDR, vcpu->arch.tid);
3732 mtspr(SPRN_AMR, vcpu->arch.amr);
3733 mtspr(SPRN_UAMOR, vcpu->arch.uamor);
3734
3735
3736
3737
3738
3739
3740
3741
3742 if (!(vcpu->arch.ctrl & 1))
3743 mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1);
3744}
3745
3746static void store_spr_state(struct kvm_vcpu *vcpu)
3747{
3748 vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
3749
3750 vcpu->arch.iamr = mfspr(SPRN_IAMR);
3751 vcpu->arch.pspb = mfspr(SPRN_PSPB);
3752 vcpu->arch.fscr = mfspr(SPRN_FSCR);
3753 vcpu->arch.tar = mfspr(SPRN_TAR);
3754 vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
3755 vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
3756 vcpu->arch.bescr = mfspr(SPRN_BESCR);
3757 vcpu->arch.wort = mfspr(SPRN_WORT);
3758 vcpu->arch.tid = mfspr(SPRN_TIDR);
3759 vcpu->arch.amr = mfspr(SPRN_AMR);
3760 vcpu->arch.uamor = mfspr(SPRN_UAMOR);
3761 vcpu->arch.dscr = mfspr(SPRN_DSCR);
3762}
3763
3764
3765
3766
3767struct p9_host_os_sprs {
3768 unsigned long dscr;
3769 unsigned long tidr;
3770 unsigned long iamr;
3771 unsigned long amr;
3772 unsigned long fscr;
3773};
3774
3775static void save_p9_host_os_sprs(struct p9_host_os_sprs *host_os_sprs)
3776{
3777 host_os_sprs->dscr = mfspr(SPRN_DSCR);
3778 host_os_sprs->tidr = mfspr(SPRN_TIDR);
3779 host_os_sprs->iamr = mfspr(SPRN_IAMR);
3780 host_os_sprs->amr = mfspr(SPRN_AMR);
3781 host_os_sprs->fscr = mfspr(SPRN_FSCR);
3782}
3783
3784
3785static void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
3786 struct p9_host_os_sprs *host_os_sprs)
3787{
3788 mtspr(SPRN_PSPB, 0);
3789 mtspr(SPRN_WORT, 0);
3790 mtspr(SPRN_UAMOR, 0);
3791
3792 mtspr(SPRN_DSCR, host_os_sprs->dscr);
3793 mtspr(SPRN_TIDR, host_os_sprs->tidr);
3794 mtspr(SPRN_IAMR, host_os_sprs->iamr);
3795
3796 if (host_os_sprs->amr != vcpu->arch.amr)
3797 mtspr(SPRN_AMR, host_os_sprs->amr);
3798
3799 if (host_os_sprs->fscr != vcpu->arch.fscr)
3800 mtspr(SPRN_FSCR, host_os_sprs->fscr);
3801
3802
3803 if (!(vcpu->arch.ctrl & 1))
3804 mtspr(SPRN_CTRLT, 1);
3805}
3806
3807static inline bool hcall_is_xics(unsigned long req)
3808{
3809 return req == H_EOI || req == H_CPPR || req == H_IPI ||
3810 req == H_IPOLL || req == H_XIRR || req == H_XIRR_X;
3811}
3812
3813
3814
3815
3816static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
3817 unsigned long lpcr)
3818{
3819 struct kvmppc_vcore *vc = vcpu->arch.vcore;
3820 struct p9_host_os_sprs host_os_sprs;
3821 s64 dec;
3822 u64 tb;
3823 int trap, save_pmu;
3824
3825 WARN_ON_ONCE(vcpu->arch.ceded);
3826
3827 dec = mfspr(SPRN_DEC);
3828 tb = mftb();
3829 if (dec < 0)
3830 return BOOK3S_INTERRUPT_HV_DECREMENTER;
3831 local_paca->kvm_hstate.dec_expires = dec + tb;
3832 if (local_paca->kvm_hstate.dec_expires < time_limit)
3833 time_limit = local_paca->kvm_hstate.dec_expires;
3834
3835 save_p9_host_os_sprs(&host_os_sprs);
3836
3837 kvmhv_save_host_pmu();
3838
3839 kvmppc_subcore_enter_guest();
3840
3841 vc->entry_exit_map = 1;
3842 vc->in_guest = 1;
3843
3844 if (vcpu->arch.vpa.pinned_addr) {
3845 struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
3846 u32 yield_count = be32_to_cpu(lp->yield_count) + 1;
3847 lp->yield_count = cpu_to_be32(yield_count);
3848 vcpu->arch.vpa.dirty = 1;
3849 }
3850
3851 if (cpu_has_feature(CPU_FTR_TM) ||
3852 cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
3853 kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
3854
3855 kvmhv_load_guest_pmu(vcpu);
3856
3857 msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
3858 load_fp_state(&vcpu->arch.fp);
3859#ifdef CONFIG_ALTIVEC
3860 load_vr_state(&vcpu->arch.vr);
3861#endif
3862 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
3863
3864 load_spr_state(vcpu);
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878 mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb());
3879
3880 if (kvmhv_on_pseries()) {
3881
3882
3883
3884
3885
3886
3887 unsigned long host_psscr;
3888
3889 struct hv_guest_state hvregs;
3890
3891 host_psscr = mfspr(SPRN_PSSCR_PR);
3892 mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
3893 kvmhv_save_hv_regs(vcpu, &hvregs);
3894 hvregs.lpcr = lpcr;
3895 vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
3896 hvregs.version = HV_GUEST_STATE_VERSION;
3897 if (vcpu->arch.nested) {
3898 hvregs.lpid = vcpu->arch.nested->shadow_lpid;
3899 hvregs.vcpu_token = vcpu->arch.nested_vcpu_id;
3900 } else {
3901 hvregs.lpid = vcpu->kvm->arch.lpid;
3902 hvregs.vcpu_token = vcpu->vcpu_id;
3903 }
3904 hvregs.hdec_expiry = time_limit;
3905 mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
3906 mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
3907 trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
3908 __pa(&vcpu->arch.regs));
3909 kvmhv_restore_hv_return_state(vcpu, &hvregs);
3910 vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
3911 vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
3912 vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
3913 vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
3914 mtspr(SPRN_PSSCR_PR, host_psscr);
3915
3916
3917 if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
3918 kvmppc_get_gpr(vcpu, 3) == H_CEDE) {
3919 kvmppc_cede(vcpu);
3920 kvmppc_set_gpr(vcpu, 3, 0);
3921 trap = 0;
3922 }
3923 } else {
3924 kvmppc_xive_push_vcpu(vcpu);
3925 trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr);
3926 if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
3927 !(vcpu->arch.shregs.msr & MSR_PR)) {
3928 unsigned long req = kvmppc_get_gpr(vcpu, 3);
3929
3930
3931 if (req == H_CEDE) {
3932 kvmppc_cede(vcpu);
3933 kvmppc_xive_rearm_escalation(vcpu);
3934 kvmppc_set_gpr(vcpu, 3, 0);
3935 trap = 0;
3936
3937
3938 } else if (hcall_is_xics(req)) {
3939 int ret;
3940
3941 ret = kvmppc_xive_xics_hcall(vcpu, req);
3942 if (ret != H_TOO_HARD) {
3943 kvmppc_set_gpr(vcpu, 3, ret);
3944 trap = 0;
3945 }
3946 }
3947 }
3948 kvmppc_xive_pull_vcpu(vcpu);
3949
3950 if (kvm_is_radix(vcpu->kvm))
3951 vcpu->arch.slb_max = 0;
3952 }
3953
3954 dec = mfspr(SPRN_DEC);
3955 if (!(lpcr & LPCR_LD))
3956 dec = (s32) dec;
3957 tb = mftb();
3958 vcpu->arch.dec_expires = dec + tb;
3959 vcpu->cpu = -1;
3960 vcpu->arch.thread_cpu = -1;
3961
3962 store_spr_state(vcpu);
3963
3964 restore_p9_host_os_sprs(vcpu, &host_os_sprs);
3965
3966 msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
3967 store_fp_state(&vcpu->arch.fp);
3968#ifdef CONFIG_ALTIVEC
3969 store_vr_state(&vcpu->arch.vr);
3970#endif
3971 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
3972
3973 if (cpu_has_feature(CPU_FTR_TM) ||
3974 cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
3975 kvmppc_save_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
3976
3977 save_pmu = 1;
3978 if (vcpu->arch.vpa.pinned_addr) {
3979 struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
3980 u32 yield_count = be32_to_cpu(lp->yield_count) + 1;
3981 lp->yield_count = cpu_to_be32(yield_count);
3982 vcpu->arch.vpa.dirty = 1;
3983 save_pmu = lp->pmcregs_in_use;
3984 }
3985
3986 save_pmu |= nesting_enabled(vcpu->kvm);
3987
3988 kvmhv_save_guest_pmu(vcpu, save_pmu);
3989
3990 vc->entry_exit_map = 0x101;
3991 vc->in_guest = 0;
3992
3993 mtspr(SPRN_DEC, local_paca->kvm_hstate.dec_expires - mftb());
3994
3995 if (test_irq_work_pending())
3996 set_dec(1);
3997 mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso);
3998
3999 kvmhv_load_host_pmu();
4000
4001 kvmppc_subcore_exit_guest();
4002
4003 return trap;
4004}
4005
4006
4007
4008
4009
4010static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc,
4011 struct kvm_vcpu *vcpu, int wait_state)
4012{
4013 DEFINE_WAIT(wait);
4014
4015 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
4016 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
4017 spin_unlock(&vc->lock);
4018 schedule();
4019 spin_lock(&vc->lock);
4020 }
4021 finish_wait(&vcpu->arch.cpu_run, &wait);
4022}
4023
4024static void grow_halt_poll_ns(struct kvmppc_vcore *vc)
4025{
4026 if (!halt_poll_ns_grow)
4027 return;
4028
4029 vc->halt_poll_ns *= halt_poll_ns_grow;
4030 if (vc->halt_poll_ns < halt_poll_ns_grow_start)
4031 vc->halt_poll_ns = halt_poll_ns_grow_start;
4032}
4033
4034static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
4035{
4036 if (halt_poll_ns_shrink == 0)
4037 vc->halt_poll_ns = 0;
4038 else
4039 vc->halt_poll_ns /= halt_poll_ns_shrink;
4040}
4041
4042#ifdef CONFIG_KVM_XICS
4043static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
4044{
4045 if (!xics_on_xive())
4046 return false;
4047 return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr <
4048 vcpu->arch.xive_saved_state.cppr;
4049}
4050#else
4051static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
4052{
4053 return false;
4054}
4055#endif
4056
4057static bool kvmppc_vcpu_woken(struct kvm_vcpu *vcpu)
4058{
4059 if (vcpu->arch.pending_exceptions