1
2
3
4
5
6#include <linux/interrupt.h>
7#include <linux/irq.h>
8#include <linux/kvm.h>
9#include <linux/kvm_host.h>
10#include <linux/list_sort.h>
11#include <linux/nospec.h>
12
13#include <asm/kvm_hyp.h>
14
15#include "vgic.h"
16
17#define CREATE_TRACE_POINTS
18#include "trace.h"
19
20struct vgic_global kvm_vgic_global_state __ro_after_init = {
21 .gicv3_cpuif = STATIC_KEY_FALSE_INIT,
22};
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
59{
60 struct vgic_dist *dist = &kvm->arch.vgic;
61 struct vgic_irq *irq = NULL;
62 unsigned long flags;
63
64 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
65
66 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
67 if (irq->intid != intid)
68 continue;
69
70
71
72
73
74 vgic_get_irq_kref(irq);
75 goto out_unlock;
76 }
77 irq = NULL;
78
79out_unlock:
80 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
81
82 return irq;
83}
84
85
86
87
88
89
90struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
91 u32 intid)
92{
93
94 if (intid <= VGIC_MAX_PRIVATE) {
95 intid = array_index_nospec(intid, VGIC_MAX_PRIVATE + 1);
96 return &vcpu->arch.vgic_cpu.private_irqs[intid];
97 }
98
99
100 if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
101 intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS);
102 return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
103 }
104
105
106 if (intid >= VGIC_MIN_LPI)
107 return vgic_get_lpi(kvm, intid);
108
109 WARN(1, "Looking up struct vgic_irq for reserved INTID");
110 return NULL;
111}
112
113
114
115
116
117
118static void vgic_irq_release(struct kref *ref)
119{
120}
121
122
123
124
125void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq)
126{
127 struct vgic_dist *dist = &kvm->arch.vgic;
128
129 if (!kref_put(&irq->refcount, vgic_irq_release))
130 return;
131
132 list_del(&irq->lpi_list);
133 dist->lpi_list_count--;
134
135 kfree(irq);
136}
137
138void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
139{
140 struct vgic_dist *dist = &kvm->arch.vgic;
141 unsigned long flags;
142
143 if (irq->intid < VGIC_MIN_LPI)
144 return;
145
146 raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
147 __vgic_put_lpi_locked(kvm, irq);
148 raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
149}
150
151void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
152{
153 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
154 struct vgic_irq *irq, *tmp;
155 unsigned long flags;
156
157 raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
158
159 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
160 if (irq->intid >= VGIC_MIN_LPI) {
161 raw_spin_lock(&irq->irq_lock);
162 list_del(&irq->ap_list);
163 irq->vcpu = NULL;
164 raw_spin_unlock(&irq->irq_lock);
165 vgic_put_irq(vcpu->kvm, irq);
166 }
167 }
168
169 raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
170}
171
172void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending)
173{
174 WARN_ON(irq_set_irqchip_state(irq->host_irq,
175 IRQCHIP_STATE_PENDING,
176 pending));
177}
178
179bool vgic_get_phys_line_level(struct vgic_irq *irq)
180{
181 bool line_level;
182
183 BUG_ON(!irq->hw);
184
185 if (irq->ops && irq->ops->get_input_level)
186 return irq->ops->get_input_level(irq->intid);
187
188 WARN_ON(irq_get_irqchip_state(irq->host_irq,
189 IRQCHIP_STATE_PENDING,
190 &line_level));
191 return line_level;
192}
193
194
195void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
196{
197
198 BUG_ON(!irq->hw);
199 WARN_ON(irq_set_irqchip_state(irq->host_irq,
200 IRQCHIP_STATE_ACTIVE,
201 active));
202}
203
204
205
206
207
208
209
210
211
212
213
214
215static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
216{
217 lockdep_assert_held(&irq->irq_lock);
218
219
220 if (irq->active)
221 return irq->vcpu ? : irq->target_vcpu;
222
223
224
225
226
227
228
229 if (irq->enabled && irq_is_pending(irq)) {
230 if (unlikely(irq->target_vcpu &&
231 !irq->target_vcpu->kvm->arch.vgic.enabled))
232 return NULL;
233
234 return irq->target_vcpu;
235 }
236
237
238
239
240 return NULL;
241}
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258static int vgic_irq_cmp(void *priv, const struct list_head *a,
259 const struct list_head *b)
260{
261 struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
262 struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
263 bool penda, pendb;
264 int ret;
265
266
267
268
269
270 if (unlikely(irqa == irqb))
271 return 0;
272
273 raw_spin_lock(&irqa->irq_lock);
274 raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
275
276 if (irqa->active || irqb->active) {
277 ret = (int)irqb->active - (int)irqa->active;
278 goto out;
279 }
280
281 penda = irqa->enabled && irq_is_pending(irqa);
282 pendb = irqb->enabled && irq_is_pending(irqb);
283
284 if (!penda || !pendb) {
285 ret = (int)pendb - (int)penda;
286 goto out;
287 }
288
289
290 ret = irqa->priority - irqb->priority;
291out:
292 raw_spin_unlock(&irqb->irq_lock);
293 raw_spin_unlock(&irqa->irq_lock);
294 return ret;
295}
296
297
298static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
299{
300 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
301
302 lockdep_assert_held(&vgic_cpu->ap_list_lock);
303
304 list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
305}
306
307
308
309
310
311
312static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner)
313{
314 if (irq->owner != owner)
315 return false;
316
317 switch (irq->config) {
318 case VGIC_CONFIG_LEVEL:
319 return irq->line_level != level;
320 case VGIC_CONFIG_EDGE:
321 return level;
322 }
323
324 return false;
325}
326
327
328
329
330
331
332
333
334
335bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
336 unsigned long flags)
337{
338 struct kvm_vcpu *vcpu;
339
340 lockdep_assert_held(&irq->irq_lock);
341
342retry:
343 vcpu = vgic_target_oracle(irq);
344 if (irq->vcpu || !vcpu) {
345
346
347
348
349
350
351
352
353
354 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
355
356
357
358
359
360
361
362
363
364
365 if (vcpu) {
366 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
367 kvm_vcpu_kick(vcpu);
368 }
369 return false;
370 }
371
372
373
374
375
376 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
377
378
379
380 raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
381 raw_spin_lock(&irq->irq_lock);
382
383
384
385
386
387
388
389
390
391
392
393
394
395 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
396 raw_spin_unlock(&irq->irq_lock);
397 raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
398 flags);
399
400 raw_spin_lock_irqsave(&irq->irq_lock, flags);
401 goto retry;
402 }
403
404
405
406
407
408 vgic_get_irq_kref(irq);
409 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
410 irq->vcpu = vcpu;
411
412 raw_spin_unlock(&irq->irq_lock);
413 raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
414
415 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
416 kvm_vcpu_kick(vcpu);
417
418 return true;
419}
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
439 bool level, void *owner)
440{
441 struct kvm_vcpu *vcpu;
442 struct vgic_irq *irq;
443 unsigned long flags;
444 int ret;
445
446 trace_vgic_update_irq_pending(cpuid, intid, level);
447
448 ret = vgic_lazy_init(kvm);
449 if (ret)
450 return ret;
451
452 vcpu = kvm_get_vcpu(kvm, cpuid);
453 if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
454 return -EINVAL;
455
456 irq = vgic_get_irq(kvm, vcpu, intid);
457 if (!irq)
458 return -EINVAL;
459
460 raw_spin_lock_irqsave(&irq->irq_lock, flags);
461
462 if (!vgic_validate_injection(irq, level, owner)) {
463
464 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
465 vgic_put_irq(kvm, irq);
466 return 0;
467 }
468
469 if (irq->config == VGIC_CONFIG_LEVEL)
470 irq->line_level = level;
471 else
472 irq->pending_latch = true;
473
474 vgic_queue_irq_unlock(kvm, irq, flags);
475 vgic_put_irq(kvm, irq);
476
477 return 0;
478}
479
480
481static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
482 unsigned int host_irq,
483 struct irq_ops *ops)
484{
485 struct irq_desc *desc;
486 struct irq_data *data;
487
488
489
490
491 desc = irq_to_desc(host_irq);
492 if (!desc) {
493 kvm_err("%s: no interrupt descriptor\n", __func__);
494 return -EINVAL;
495 }
496 data = irq_desc_get_irq_data(desc);
497 while (data->parent_data)
498 data = data->parent_data;
499
500 irq->hw = true;
501 irq->host_irq = host_irq;
502 irq->hwintid = data->hwirq;
503 irq->ops = ops;
504 return 0;
505}
506
507
508static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
509{
510 irq->hw = false;
511 irq->hwintid = 0;
512 irq->ops = NULL;
513}
514
515int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
516 u32 vintid, struct irq_ops *ops)
517{
518 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
519 unsigned long flags;
520 int ret;
521
522 BUG_ON(!irq);
523
524 raw_spin_lock_irqsave(&irq->irq_lock, flags);
525 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, ops);
526 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
527 vgic_put_irq(vcpu->kvm, irq);
528
529 return ret;
530}
531
532
533
534
535
536
537
538
539
540
541void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
542{
543 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
544 unsigned long flags;
545
546 if (!irq->hw)
547 goto out;
548
549 raw_spin_lock_irqsave(&irq->irq_lock, flags);
550 irq->active = false;
551 irq->pending_latch = false;
552 irq->line_level = false;
553 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
554out:
555 vgic_put_irq(vcpu->kvm, irq);
556}
557
558int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
559{
560 struct vgic_irq *irq;
561 unsigned long flags;
562
563 if (!vgic_initialized(vcpu->kvm))
564 return -EAGAIN;
565
566 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
567 BUG_ON(!irq);
568
569 raw_spin_lock_irqsave(&irq->irq_lock, flags);
570 kvm_vgic_unmap_irq(irq);
571 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
572 vgic_put_irq(vcpu->kvm, irq);
573
574 return 0;
575}
576
577
578
579
580
581
582
583
584
585
586
587int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
588{
589 struct vgic_irq *irq;
590 unsigned long flags;
591 int ret = 0;
592
593 if (!vgic_initialized(vcpu->kvm))
594 return -EAGAIN;
595
596
597 if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid))
598 return -EINVAL;
599
600 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
601 raw_spin_lock_irqsave(&irq->irq_lock, flags);
602 if (irq->owner && irq->owner != owner)
603 ret = -EEXIST;
604 else
605 irq->owner = owner;
606 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
607
608 return ret;
609}
610
611
612
613
614
615
616
617
618
619static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
620{
621 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
622 struct vgic_irq *irq, *tmp;
623
624 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
625
626retry:
627 raw_spin_lock(&vgic_cpu->ap_list_lock);
628
629 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
630 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
631 bool target_vcpu_needs_kick = false;
632
633 raw_spin_lock(&irq->irq_lock);
634
635 BUG_ON(vcpu != irq->vcpu);
636
637 target_vcpu = vgic_target_oracle(irq);
638
639 if (!target_vcpu) {
640
641
642
643
644 list_del(&irq->ap_list);
645 irq->vcpu = NULL;
646 raw_spin_unlock(&irq->irq_lock);
647
648
649
650
651
652
653
654
655 vgic_put_irq(vcpu->kvm, irq);
656 continue;
657 }
658
659 if (target_vcpu == vcpu) {
660
661 raw_spin_unlock(&irq->irq_lock);
662 continue;
663 }
664
665
666
667 raw_spin_unlock(&irq->irq_lock);
668 raw_spin_unlock(&vgic_cpu->ap_list_lock);
669
670
671
672
673
674 if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
675 vcpuA = vcpu;
676 vcpuB = target_vcpu;
677 } else {
678 vcpuA = target_vcpu;
679 vcpuB = vcpu;
680 }
681
682 raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
683 raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
684 SINGLE_DEPTH_NESTING);
685 raw_spin_lock(&irq->irq_lock);
686
687
688
689
690
691
692
693
694
695
696 if (target_vcpu == vgic_target_oracle(irq)) {
697 struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
698
699 list_del(&irq->ap_list);
700 irq->vcpu = target_vcpu;
701 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
702 target_vcpu_needs_kick = true;
703 }
704
705 raw_spin_unlock(&irq->irq_lock);
706 raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
707 raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
708
709 if (target_vcpu_needs_kick) {
710 kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
711 kvm_vcpu_kick(target_vcpu);
712 }
713
714 goto retry;
715 }
716
717 raw_spin_unlock(&vgic_cpu->ap_list_lock);
718}
719
720static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
721{
722 if (kvm_vgic_global_state.type == VGIC_V2)
723 vgic_v2_fold_lr_state(vcpu);
724 else
725 vgic_v3_fold_lr_state(vcpu);
726}
727
728
729static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
730 struct vgic_irq *irq, int lr)
731{
732 lockdep_assert_held(&irq->irq_lock);
733
734 if (kvm_vgic_global_state.type == VGIC_V2)
735 vgic_v2_populate_lr(vcpu, irq, lr);
736 else
737 vgic_v3_populate_lr(vcpu, irq, lr);
738}
739
740static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
741{
742 if (kvm_vgic_global_state.type == VGIC_V2)
743 vgic_v2_clear_lr(vcpu, lr);
744 else
745 vgic_v3_clear_lr(vcpu, lr);
746}
747
748static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
749{
750 if (kvm_vgic_global_state.type == VGIC_V2)
751 vgic_v2_set_underflow(vcpu);
752 else
753 vgic_v3_set_underflow(vcpu);
754}
755
756
757static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
758 bool *multi_sgi)
759{
760 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
761 struct vgic_irq *irq;
762 int count = 0;
763
764 *multi_sgi = false;
765
766 lockdep_assert_held(&vgic_cpu->ap_list_lock);
767
768 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
769 int w;
770
771 raw_spin_lock(&irq->irq_lock);
772
773 w = vgic_irq_get_lr_count(irq);
774 raw_spin_unlock(&irq->irq_lock);
775
776 count += w;
777 *multi_sgi |= (w > 1);
778 }
779 return count;
780}
781
782
783static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
784{
785 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
786 struct vgic_irq *irq;
787 int count;
788 bool multi_sgi;
789 u8 prio = 0xff;
790 int i = 0;
791
792 lockdep_assert_held(&vgic_cpu->ap_list_lock);
793
794 count = compute_ap_list_depth(vcpu, &multi_sgi);
795 if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
796 vgic_sort_ap_list(vcpu);
797
798 count = 0;
799
800 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
801 raw_spin_lock(&irq->irq_lock);
802
803
804
805
806
807
808
809
810 if (multi_sgi && irq->priority > prio) {
811 _raw_spin_unlock(&irq->irq_lock);
812 break;
813 }
814
815 if (likely(vgic_target_oracle(irq) == vcpu)) {
816 vgic_populate_lr(vcpu, irq, count++);
817
818 if (irq->source)
819 prio = irq->priority;
820 }
821
822 raw_spin_unlock(&irq->irq_lock);
823
824 if (count == kvm_vgic_global_state.nr_lr) {
825 if (!list_is_last(&irq->ap_list,
826 &vgic_cpu->ap_list_head))
827 vgic_set_underflow(vcpu);
828 break;
829 }
830 }
831
832
833 for (i = count ; i < kvm_vgic_global_state.nr_lr; i++)
834 vgic_clear_lr(vcpu, i);
835
836 if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
837 vcpu->arch.vgic_cpu.vgic_v2.used_lrs = count;
838 else
839 vcpu->arch.vgic_cpu.vgic_v3.used_lrs = count;
840}
841
842static inline bool can_access_vgic_from_kernel(void)
843{
844
845
846
847
848
849 return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe();
850}
851
852static inline void vgic_save_state(struct kvm_vcpu *vcpu)
853{
854 if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
855 vgic_v2_save_state(vcpu);
856 else
857 __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
858}
859
860
861void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
862{
863 int used_lrs;
864
865
866 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
867 return;
868
869 if (can_access_vgic_from_kernel())
870 vgic_save_state(vcpu);
871
872 if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
873 used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
874 else
875 used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
876
877 if (used_lrs)
878 vgic_fold_lr_state(vcpu);
879 vgic_prune_ap_list(vcpu);
880}
881
882static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
883{
884 if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
885 vgic_v2_restore_state(vcpu);
886 else
887 __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
888}
889
890
891void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
892{
893
894
895
896
897
898
899
900
901
902
903
904
905 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
906 !vgic_supports_direct_msis(vcpu->kvm))
907 return;
908
909 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
910
911 if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
912 raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
913 vgic_flush_lr_state(vcpu);
914 raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
915 }
916
917 if (can_access_vgic_from_kernel())
918 vgic_restore_state(vcpu);
919
920 if (vgic_supports_direct_msis(vcpu->kvm))
921 vgic_v4_commit(vcpu);
922}
923
924void kvm_vgic_load(struct kvm_vcpu *vcpu)
925{
926 if (unlikely(!vgic_initialized(vcpu->kvm)))
927 return;
928
929 if (kvm_vgic_global_state.type == VGIC_V2)
930 vgic_v2_load(vcpu);
931 else
932 vgic_v3_load(vcpu);
933}
934
935void kvm_vgic_put(struct kvm_vcpu *vcpu)
936{
937 if (unlikely(!vgic_initialized(vcpu->kvm)))
938 return;
939
940 if (kvm_vgic_global_state.type == VGIC_V2)
941 vgic_v2_put(vcpu);
942 else
943 vgic_v3_put(vcpu);
944}
945
946void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
947{
948 if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
949 return;
950
951 if (kvm_vgic_global_state.type == VGIC_V2)
952 vgic_v2_vmcr_sync(vcpu);
953 else
954 vgic_v3_vmcr_sync(vcpu);
955}
956
957int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
958{
959 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
960 struct vgic_irq *irq;
961 bool pending = false;
962 unsigned long flags;
963 struct vgic_vmcr vmcr;
964
965 if (!vcpu->kvm->arch.vgic.enabled)
966 return false;
967
968 if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
969 return true;
970
971 vgic_get_vmcr(vcpu, &vmcr);
972
973 raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
974
975 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
976 raw_spin_lock(&irq->irq_lock);
977 pending = irq_is_pending(irq) && irq->enabled &&
978 !irq->active &&
979 irq->priority < vmcr.pmr;
980 raw_spin_unlock(&irq->irq_lock);
981
982 if (pending)
983 break;
984 }
985
986 raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
987
988 return pending;
989}
990
991void vgic_kick_vcpus(struct kvm *kvm)
992{
993 struct kvm_vcpu *vcpu;
994 int c;
995
996
997
998
999
1000 kvm_for_each_vcpu(c, vcpu, kvm) {
1001 if (kvm_vgic_vcpu_pending_irq(vcpu)) {
1002 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
1003 kvm_vcpu_kick(vcpu);
1004 }
1005 }
1006}
1007
1008bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
1009{
1010 struct vgic_irq *irq;
1011 bool map_is_active;
1012 unsigned long flags;
1013
1014 if (!vgic_initialized(vcpu->kvm))
1015 return false;
1016
1017 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
1018 raw_spin_lock_irqsave(&irq->irq_lock, flags);
1019 map_is_active = irq->hw && irq->active;
1020 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
1021 vgic_put_irq(vcpu->kvm, irq);
1022
1023 return map_is_active;
1024}
1025