1
2
3
4
5
6
7#include <linux/kernel.h>
8#include <linux/kvm_host.h>
9#include <linux/err.h>
10#include <linux/kernel_stat.h>
11#include <linux/pgtable.h>
12
13#include <asm/kvm_book3s.h>
14#include <asm/kvm_ppc.h>
15#include <asm/hvcall.h>
16#include <asm/xics.h>
17#include <asm/synch.h>
18#include <asm/cputhreads.h>
19#include <asm/ppc-opcode.h>
20#include <asm/pnv-pci.h>
21#include <asm/opal.h>
22#include <asm/smp.h>
23
24#include "book3s_xics.h"
25
26#define DEBUG_PASSUP
27
28int h_ipi_redirect = 1;
29EXPORT_SYMBOL(h_ipi_redirect);
30int kvm_irq_bypass = 1;
31EXPORT_SYMBOL(kvm_irq_bypass);
32
33static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
34 u32 new_irq, bool check_resend);
35static int xics_opal_set_server(unsigned int hw_irq, int server_cpu);
36
37
38static void ics_rm_check_resend(struct kvmppc_xics *xics,
39 struct kvmppc_ics *ics, struct kvmppc_icp *icp)
40{
41 int i;
42
43 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
44 struct ics_irq_state *state = &ics->irq_state[i];
45 if (state->resend)
46 icp_rm_deliver_irq(xics, icp, state->number, true);
47 }
48
49}
50
51
52
53#ifdef CONFIG_SMP
54static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu)
55{
56 int hcpu;
57
58 hcpu = hcore << threads_shift;
59 kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu;
60 smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION);
61 kvmppc_set_host_ipi(hcpu);
62 smp_mb();
63 kvmhv_rm_send_ipi(hcpu);
64}
65#else
66static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { }
67#endif
68
69
70
71
72
73
74
75
76
77
78
79
80
81static inline int grab_next_hostcore(int start,
82 struct kvmppc_host_rm_core *rm_core, int max, int action)
83{
84 bool success;
85 int core;
86 union kvmppc_rm_state old, new;
87
88 for (core = start + 1; core < max; core++) {
89 old = new = READ_ONCE(rm_core[core].rm_state);
90
91 if (!old.in_host || old.rm_action)
92 continue;
93
94
95 new.rm_action = action;
96
97 success = cmpxchg64(&rm_core[core].rm_state.raw,
98 old.raw, new.raw) == old.raw;
99 if (success) {
100
101
102
103
104
105
106 smp_wmb();
107 return core;
108 }
109 }
110
111 return -1;
112}
113
114static inline int find_available_hostcore(int action)
115{
116 int core;
117 int my_core = smp_processor_id() >> threads_shift;
118 struct kvmppc_host_rm_core *rm_core = kvmppc_host_rm_ops_hv->rm_core;
119
120 core = grab_next_hostcore(my_core, rm_core, cpu_nr_cores(), action);
121 if (core == -1)
122 core = grab_next_hostcore(core, rm_core, my_core, action);
123
124 return core;
125}
126
127static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
128 struct kvm_vcpu *this_vcpu)
129{
130 struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
131 int cpu;
132 int hcore;
133
134
135 vcpu->stat.queue_intr++;
136 set_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
137
138
139 if (vcpu == this_vcpu) {
140 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_MER);
141 return;
142 }
143
144
145
146
147
148
149 cpu = vcpu->arch.thread_cpu;
150 if (cpu < 0 || cpu >= nr_cpu_ids) {
151 hcore = -1;
152 if (kvmppc_host_rm_ops_hv && h_ipi_redirect)
153 hcore = find_available_hostcore(XICS_RM_KICK_VCPU);
154 if (hcore != -1) {
155 icp_send_hcore_msg(hcore, vcpu);
156 } else {
157 this_icp->rm_action |= XICS_RM_KICK_VCPU;
158 this_icp->rm_kick_target = vcpu;
159 }
160 return;
161 }
162
163 smp_mb();
164 kvmhv_rm_send_ipi(cpu);
165}
166
167static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
168{
169
170 clear_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
171 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER);
172}
173
174static inline bool icp_rm_try_update(struct kvmppc_icp *icp,
175 union kvmppc_icp_state old,
176 union kvmppc_icp_state new)
177{
178 struct kvm_vcpu *this_vcpu = local_paca->kvm_hstate.kvm_vcpu;
179 bool success;
180
181
182 new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
183
184
185 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
186 if (!success)
187 goto bail;
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204 if (new.out_ee)
205 icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu);
206
207
208 this_vcpu->arch.icp->rm_dbgstate = new;
209 this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu;
210
211 bail:
212 return success;
213}
214
215static inline int check_too_hard(struct kvmppc_xics *xics,
216 struct kvmppc_icp *icp)
217{
218 return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS;
219}
220
221static void icp_rm_check_resend(struct kvmppc_xics *xics,
222 struct kvmppc_icp *icp)
223{
224 u32 icsid;
225
226
227 smp_rmb();
228 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
229 struct kvmppc_ics *ics = xics->ics[icsid];
230
231 if (!test_and_clear_bit(icsid, icp->resend_map))
232 continue;
233 if (!ics)
234 continue;
235 ics_rm_check_resend(xics, ics, icp);
236 }
237}
238
239static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
240 u32 *reject)
241{
242 union kvmppc_icp_state old_state, new_state;
243 bool success;
244
245 do {
246 old_state = new_state = READ_ONCE(icp->state);
247
248 *reject = 0;
249
250
251 success = new_state.cppr > priority &&
252 new_state.mfrr > priority &&
253 new_state.pending_pri > priority;
254
255
256
257
258
259 if (success) {
260 *reject = new_state.xisr;
261 new_state.xisr = irq;
262 new_state.pending_pri = priority;
263 } else {
264
265
266
267
268
269 new_state.need_resend = true;
270 }
271
272 } while (!icp_rm_try_update(icp, old_state, new_state));
273
274 return success;
275}
276
277static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
278 u32 new_irq, bool check_resend)
279{
280 struct ics_irq_state *state;
281 struct kvmppc_ics *ics;
282 u32 reject;
283 u16 src;
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300 again:
301
302 ics = kvmppc_xics_find_ics(xics, new_irq, &src);
303 if (!ics) {
304
305 xics->err_noics++;
306 return;
307 }
308 state = &ics->irq_state[src];
309
310
311 arch_spin_lock(&ics->lock);
312
313
314 if (!icp || state->server != icp->server_num) {
315 icp = kvmppc_xics_find_server(xics->kvm, state->server);
316 if (!icp) {
317
318 xics->err_noicp++;
319 goto out;
320 }
321 }
322
323 if (check_resend)
324 if (!state->resend)
325 goto out;
326
327
328 state->resend = 0;
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345 if (state->priority == MASKED) {
346 state->masked_pending = 1;
347 goto out;
348 }
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366 if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) {
367
368
369
370 if (reject && reject != XICS_IPI) {
371 arch_spin_unlock(&ics->lock);
372 icp->n_reject++;
373 new_irq = reject;
374 check_resend = 0;
375 goto again;
376 }
377 } else {
378
379
380
381
382 state->resend = 1;
383
384
385
386
387
388 smp_wmb();
389 set_bit(ics->icsid, icp->resend_map);
390
391
392
393
394
395
396
397 smp_mb();
398 if (!icp->state.need_resend) {
399 state->resend = 0;
400 arch_spin_unlock(&ics->lock);
401 check_resend = 0;
402 goto again;
403 }
404 }
405 out:
406 arch_spin_unlock(&ics->lock);
407}
408
409static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
410 u8 new_cppr)
411{
412 union kvmppc_icp_state old_state, new_state;
413 bool resend;
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444 do {
445 old_state = new_state = READ_ONCE(icp->state);
446
447
448 new_state.cppr = new_cppr;
449
450
451
452
453
454
455
456
457
458
459 if (new_state.mfrr < new_cppr &&
460 new_state.mfrr <= new_state.pending_pri) {
461 new_state.pending_pri = new_state.mfrr;
462 new_state.xisr = XICS_IPI;
463 }
464
465
466 resend = new_state.need_resend;
467 new_state.need_resend = 0;
468
469 } while (!icp_rm_try_update(icp, old_state, new_state));
470
471
472
473
474
475
476 if (resend) {
477 icp->n_check_resend++;
478 icp_rm_check_resend(xics, icp);
479 }
480}
481
482
483unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu)
484{
485 union kvmppc_icp_state old_state, new_state;
486 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
487 struct kvmppc_icp *icp = vcpu->arch.icp;
488 u32 xirr;
489
490 if (!xics || !xics->real_mode)
491 return H_TOO_HARD;
492
493
494 icp_rm_clr_vcpu_irq(icp->vcpu);
495
496
497
498
499
500
501
502
503 do {
504 old_state = new_state = READ_ONCE(icp->state);
505
506 xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
507 if (!old_state.xisr)
508 break;
509 new_state.cppr = new_state.pending_pri;
510 new_state.pending_pri = 0xff;
511 new_state.xisr = 0;
512
513 } while (!icp_rm_try_update(icp, old_state, new_state));
514
515
516 vcpu->arch.regs.gpr[4] = xirr;
517
518 return check_too_hard(xics, icp);
519}
520
521int xics_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
522 unsigned long mfrr)
523{
524 union kvmppc_icp_state old_state, new_state;
525 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
526 struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp;
527 u32 reject;
528 bool resend;
529 bool local;
530
531 if (!xics || !xics->real_mode)
532 return H_TOO_HARD;
533
534 local = this_icp->server_num == server;
535 if (local)
536 icp = this_icp;
537 else
538 icp = kvmppc_xics_find_server(vcpu->kvm, server);
539 if (!icp)
540 return H_PARAMETER;
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569 do {
570 old_state = new_state = READ_ONCE(icp->state);
571
572
573 new_state.mfrr = mfrr;
574
575
576 reject = 0;
577 resend = false;
578 if (mfrr < new_state.cppr) {
579
580 if (mfrr <= new_state.pending_pri) {
581 reject = new_state.xisr;
582 new_state.pending_pri = mfrr;
583 new_state.xisr = XICS_IPI;
584 }
585 }
586
587 if (mfrr > old_state.mfrr) {
588 resend = new_state.need_resend;
589 new_state.need_resend = 0;
590 }
591 } while (!icp_rm_try_update(icp, old_state, new_state));
592
593
594 if (reject && reject != XICS_IPI) {
595 this_icp->n_reject++;
596 icp_rm_deliver_irq(xics, icp, reject, false);
597 }
598
599
600 if (resend) {
601 this_icp->n_check_resend++;
602 icp_rm_check_resend(xics, icp);
603 }
604
605 return check_too_hard(xics, this_icp);
606}
607
608int xics_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
609{
610 union kvmppc_icp_state old_state, new_state;
611 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
612 struct kvmppc_icp *icp = vcpu->arch.icp;
613 u32 reject;
614
615 if (!xics || !xics->real_mode)
616 return H_TOO_HARD;
617
618
619
620
621
622
623
624
625 if (cppr > icp->state.cppr) {
626 icp_rm_down_cppr(xics, icp, cppr);
627 goto bail;
628 } else if (cppr == icp->state.cppr)
629 return H_SUCCESS;
630
631
632
633
634
635
636
637
638
639
640
641
642 icp_rm_clr_vcpu_irq(icp->vcpu);
643
644 do {
645 old_state = new_state = READ_ONCE(icp->state);
646
647 reject = 0;
648 new_state.cppr = cppr;
649
650 if (cppr <= new_state.pending_pri) {
651 reject = new_state.xisr;
652 new_state.xisr = 0;
653 new_state.pending_pri = 0xff;
654 }
655
656 } while (!icp_rm_try_update(icp, old_state, new_state));
657
658
659
660
661
662 if (reject && reject != XICS_IPI) {
663 icp->n_reject++;
664 icp_rm_deliver_irq(xics, icp, reject, false);
665 }
666 bail:
667 return check_too_hard(xics, icp);
668}
669
670static int ics_rm_eoi(struct kvm_vcpu *vcpu, u32 irq)
671{
672 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
673 struct kvmppc_icp *icp = vcpu->arch.icp;
674 struct kvmppc_ics *ics;
675 struct ics_irq_state *state;
676 u16 src;
677 u32 pq_old, pq_new;
678
679
680
681
682
683
684
685
686
687 ics = kvmppc_xics_find_ics(xics, irq, &src);
688 if (!ics)
689 goto bail;
690
691 state = &ics->irq_state[src];
692
693 if (state->lsi)
694 pq_new = state->pq_state;
695 else
696 do {
697 pq_old = state->pq_state;
698 pq_new = pq_old >> 1;
699 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
700
701 if (pq_new & PQ_PRESENTED)
702 icp_rm_deliver_irq(xics, NULL, irq, false);
703
704 if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
705 icp->rm_action |= XICS_RM_NOTIFY_EOI;
706 icp->rm_eoied_irq = irq;
707 }
708
709 if (state->host_irq) {
710 ++vcpu->stat.pthru_all;
711 if (state->intr_cpu != -1) {
712 int pcpu = raw_smp_processor_id();
713
714 pcpu = cpu_first_thread_sibling(pcpu);
715 ++vcpu->stat.pthru_host;
716 if (state->intr_cpu != pcpu) {
717 ++vcpu->stat.pthru_bad_aff;
718 xics_opal_set_server(state->host_irq, pcpu);
719 }
720 state->intr_cpu = -1;
721 }
722 }
723
724 bail:
725 return check_too_hard(xics, icp);
726}
727
728int xics_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
729{
730 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
731 struct kvmppc_icp *icp = vcpu->arch.icp;
732 u32 irq = xirr & 0x00ffffff;
733
734 if (!xics || !xics->real_mode)
735 return H_TOO_HARD;
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751 icp_rm_down_cppr(xics, icp, xirr >> 24);
752
753
754 if (irq == XICS_IPI)
755 return check_too_hard(xics, icp);
756
757 return ics_rm_eoi(vcpu, irq);
758}
759
760static unsigned long eoi_rc;
761
762static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again)
763{
764 void __iomem *xics_phys;
765 int64_t rc;
766
767 rc = pnv_opal_pci_msi_eoi(c, hwirq);
768
769 if (rc)
770 eoi_rc = rc;
771
772 iosync();
773
774
775 xics_phys = local_paca->kvm_hstate.xics_phys;
776 if (xics_phys) {
777 __raw_rm_writel(xirr, xics_phys + XICS_XIRR);
778 } else {
779 rc = opal_int_eoi(be32_to_cpu(xirr));
780 *again = rc > 0;
781 }
782}
783
784static int xics_opal_set_server(unsigned int hw_irq, int server_cpu)
785{
786 unsigned int mangle_cpu = get_hard_smp_processor_id(server_cpu) << 2;
787
788 return opal_set_xive(hw_irq, mangle_cpu, DEFAULT_PRIORITY);
789}
790
791
792
793
794
795
796
797
798static inline void this_cpu_inc_rm(unsigned int __percpu *addr)
799{
800 unsigned long l;
801 unsigned int *raddr;
802 int cpu = smp_processor_id();
803
804 raddr = per_cpu_ptr(addr, cpu);
805 l = (unsigned long)raddr;
806
807 if (get_region_id(l) == VMALLOC_REGION_ID) {
808 l = vmalloc_to_phys(raddr);
809 raddr = (unsigned int *)l;
810 }
811 ++*raddr;
812}
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832static void kvmppc_rm_handle_irq_desc(struct irq_desc *desc)
833{
834 this_cpu_inc_rm(desc->kstat_irqs);
835 __this_cpu_inc(kstat.irqs_sum);
836}
837
838long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
839 __be32 xirr,
840 struct kvmppc_irq_map *irq_map,
841 struct kvmppc_passthru_irqmap *pimap,
842 bool *again)
843{
844 struct kvmppc_xics *xics;
845 struct kvmppc_icp *icp;
846 struct kvmppc_ics *ics;
847 struct ics_irq_state *state;
848 u32 irq;
849 u16 src;
850 u32 pq_old, pq_new;
851
852 irq = irq_map->v_hwirq;
853 xics = vcpu->kvm->arch.xics;
854 icp = vcpu->arch.icp;
855
856 kvmppc_rm_handle_irq_desc(irq_map->desc);
857
858 ics = kvmppc_xics_find_ics(xics, irq, &src);
859 if (!ics)
860 return 2;
861
862 state = &ics->irq_state[src];
863
864
865 do {
866 pq_old = state->pq_state;
867 pq_new = ((pq_old << 1) & 3) | PQ_PRESENTED;
868 } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
869
870
871 if (pq_new == PQ_PRESENTED)
872 icp_rm_deliver_irq(xics, icp, irq, false);
873
874
875 icp_eoi(irq_desc_get_chip(irq_map->desc), irq_map->r_hwirq, xirr,
876 again);
877
878 if (check_too_hard(xics, icp) == H_TOO_HARD)
879 return 2;
880 else
881 return -2;
882}
883
884
885
886
887
888
889static void rm_host_ipi_action(int action, void *data)
890{
891 switch (action) {
892 case XICS_RM_KICK_VCPU:
893 kvmppc_host_rm_ops_hv->vcpu_kick(data);
894 break;
895 default:
896 WARN(1, "Unexpected rm_action=%d data=%p\n", action, data);
897 break;
898 }
899
900}
901
902void kvmppc_xics_ipi_action(void)
903{
904 int core;
905 unsigned int cpu = smp_processor_id();
906 struct kvmppc_host_rm_core *rm_corep;
907
908 core = cpu >> threads_shift;
909 rm_corep = &kvmppc_host_rm_ops_hv->rm_core[core];
910
911 if (rm_corep->rm_data) {
912 rm_host_ipi_action(rm_corep->rm_state.rm_action,
913 rm_corep->rm_data);
914
915 rm_corep->rm_data = NULL;
916 smp_wmb();
917 rm_corep->rm_state.rm_action = 0;
918 }
919}
920