1
2
3
4
5
6
7
8
9
10
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/module.h>
15#include <linux/preempt.h>
16#include <linux/vmalloc.h>
17#include <asm/cacheflush.h>
18#include <asm/cacheops.h>
19#include <asm/cmpxchg.h>
20#include <asm/fpu.h>
21#include <asm/hazards.h>
22#include <asm/inst.h>
23#include <asm/mmu_context.h>
24#include <asm/r4kcache.h>
25#include <asm/time.h>
26#include <asm/tlb.h>
27#include <asm/tlbex.h>
28
29#include <linux/kvm_host.h>
30
31#include "interrupt.h"
32#ifdef CONFIG_CPU_LOONGSON64
33#include "loongson_regs.h"
34#endif
35
36#include "trace.h"
37
38
39static struct kvm_vcpu *last_vcpu[NR_CPUS];
40
41static struct kvm_vcpu *last_exec_vcpu[NR_CPUS];
42
43
44
45
46
47static unsigned int kvm_vz_guest_vtlb_size;
48
49static inline long kvm_vz_read_gc0_ebase(void)
50{
51 if (sizeof(long) == 8 && cpu_has_ebase_wg)
52 return read_gc0_ebase_64();
53 else
54 return read_gc0_ebase();
55}
56
57static inline void kvm_vz_write_gc0_ebase(long v)
58{
59
60
61
62
63
64 if (sizeof(long) == 8 &&
65 (cpu_has_mips64r6 || cpu_has_ebase_wg)) {
66 write_gc0_ebase_64(v | MIPS_EBASE_WG);
67 write_gc0_ebase_64(v);
68 } else {
69 write_gc0_ebase(v | MIPS_EBASE_WG);
70 write_gc0_ebase(v);
71 }
72}
73
74
75
76
77
78
79
80
81
82
83
84static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu)
85{
86 return CONF_CM_CMASK;
87}
88
89static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu)
90{
91 return 0;
92}
93
94static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu)
95{
96 return 0;
97}
98
99static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu)
100{
101 return MIPS_CONF3_ISA_OE;
102}
103
104static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu)
105{
106
107 return MIPS_CONF4_VFTLBPAGESIZE;
108}
109
110static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
111{
112 unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI;
113
114
115 if (kvm_mips_guest_has_msa(&vcpu->arch))
116 mask |= MIPS_CONF5_MSAEN;
117
118
119
120
121
122 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
123 if (cpu_has_ufr)
124 mask |= MIPS_CONF5_UFR;
125 if (cpu_has_fre)
126 mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE;
127 }
128
129 return mask;
130}
131
132static inline unsigned int kvm_vz_config6_guest_wrmask(struct kvm_vcpu *vcpu)
133{
134 return LOONGSON_CONF6_INTIMER | LOONGSON_CONF6_EXTIMER;
135}
136
137
138
139
140
141
142
143
144
145
146
147
148static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu)
149{
150 return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M;
151}
152
153static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu)
154{
155 unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M;
156
157
158 if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
159 mask |= MIPS_CONF1_FP;
160
161 return mask;
162}
163
164static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu)
165{
166 return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M;
167}
168
169static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu)
170{
171 unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M |
172 MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC;
173
174
175 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
176 mask |= MIPS_CONF3_MSA;
177
178 return mask;
179}
180
181static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu)
182{
183 return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M;
184}
185
186static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
187{
188 return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP;
189}
190
191static inline unsigned int kvm_vz_config6_user_wrmask(struct kvm_vcpu *vcpu)
192{
193 return kvm_vz_config6_guest_wrmask(vcpu) |
194 LOONGSON_CONF6_SFBEN | LOONGSON_CONF6_FTLBDIS;
195}
196
197static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
198{
199
200 return gva;
201}
202
203static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
204{
205 set_bit(priority, &vcpu->arch.pending_exceptions);
206 clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
207}
208
209static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
210{
211 clear_bit(priority, &vcpu->arch.pending_exceptions);
212 set_bit(priority, &vcpu->arch.pending_exceptions_clr);
213}
214
215static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu)
216{
217
218
219
220
221 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
222}
223
224static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
225{
226
227
228
229
230 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
231}
232
233static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu,
234 struct kvm_mips_interrupt *irq)
235{
236 int intr = (int)irq->irq;
237
238
239
240
241
242 kvm_vz_queue_irq(vcpu, kvm_irq_to_priority(intr));
243}
244
245static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
246 struct kvm_mips_interrupt *irq)
247{
248 int intr = (int)irq->irq;
249
250
251
252
253
254 kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
255}
256
257static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
258 u32 cause)
259{
260 u32 irq = (priority < MIPS_EXC_MAX) ?
261 kvm_priority_to_irq[priority] : 0;
262
263 switch (priority) {
264 case MIPS_EXC_INT_TIMER:
265 set_gc0_cause(C_TI);
266 break;
267
268 case MIPS_EXC_INT_IO_1:
269 case MIPS_EXC_INT_IO_2:
270 case MIPS_EXC_INT_IPI_1:
271 case MIPS_EXC_INT_IPI_2:
272 if (cpu_has_guestctl2)
273 set_c0_guestctl2(irq);
274 else
275 set_gc0_cause(irq);
276 break;
277
278 default:
279 break;
280 }
281
282 clear_bit(priority, &vcpu->arch.pending_exceptions);
283 return 1;
284}
285
286static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
287 u32 cause)
288{
289 u32 irq = (priority < MIPS_EXC_MAX) ?
290 kvm_priority_to_irq[priority] : 0;
291
292 switch (priority) {
293 case MIPS_EXC_INT_TIMER:
294
295
296
297
298
299 if (cpu_has_guestctl2) {
300 if (!(read_c0_guestctl2() & (irq << 14)))
301 clear_c0_guestctl2(irq);
302 } else {
303 clear_gc0_cause(irq);
304 }
305 break;
306
307 case MIPS_EXC_INT_IO_1:
308 case MIPS_EXC_INT_IO_2:
309 case MIPS_EXC_INT_IPI_1:
310 case MIPS_EXC_INT_IPI_2:
311
312 if (cpu_has_guestctl2) {
313 if (!(read_c0_guestctl2() & (irq << 14)))
314 clear_c0_guestctl2(irq);
315 } else {
316 clear_gc0_cause(irq);
317 }
318 break;
319
320 default:
321 break;
322 }
323
324 clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
325 return 1;
326}
327
328
329
330
331
332
333
334
335
336
337
338
339
340static bool kvm_vz_should_use_htimer(struct kvm_vcpu *vcpu)
341{
342 if (kvm_mips_count_disabled(vcpu))
343 return false;
344
345
346 if (mips_hpt_frequency != vcpu->arch.count_hz)
347 return false;
348
349
350 if (current_cpu_data.gtoffset_mask != 0xffffffff)
351 return false;
352
353 return true;
354}
355
356
357
358
359
360
361
362
363
364
365static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare,
366 u32 cause)
367{
368
369
370
371
372 write_c0_gtoffset(compare - read_c0_count());
373
374 back_to_back_c0_hazard();
375 write_gc0_cause(cause);
376}
377
378
379
380
381
382
383
384
385
386
387static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
388 u32 compare, u32 cause)
389{
390 u32 start_count, after_count;
391 ktime_t freeze_time;
392 unsigned long flags;
393
394
395
396
397
398 local_irq_save(flags);
399 freeze_time = kvm_mips_freeze_hrtimer(vcpu, &start_count);
400 write_c0_gtoffset(start_count - read_c0_count());
401 local_irq_restore(flags);
402
403
404 back_to_back_c0_hazard();
405 write_gc0_cause(cause);
406
407
408
409
410
411
412 back_to_back_c0_hazard();
413 after_count = read_gc0_count();
414 if (after_count - start_count > compare - start_count - 1)
415 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
416}
417
418
419
420
421
422
423
424static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
425{
426 struct mips_coproc *cop0 = vcpu->arch.cop0;
427 u32 cause, compare;
428
429 compare = kvm_read_sw_gc0_compare(cop0);
430 cause = kvm_read_sw_gc0_cause(cop0);
431
432 write_gc0_compare(compare);
433 _kvm_vz_restore_stimer(vcpu, compare, cause);
434}
435
436
437
438
439
440
441
442
443
444
445void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
446{
447 u32 gctl0;
448
449 gctl0 = read_c0_guestctl0();
450 if (!(gctl0 & MIPS_GCTL0_GT) && kvm_vz_should_use_htimer(vcpu)) {
451
452 write_c0_guestctl0(gctl0 | MIPS_GCTL0_GT);
453
454 _kvm_vz_restore_htimer(vcpu, read_gc0_compare(),
455 read_gc0_cause());
456 }
457}
458
459
460
461
462
463
464
465
466
467
468
469static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu,
470 u32 *out_compare, u32 *out_cause)
471{
472 u32 cause, compare, before_count, end_count;
473 ktime_t before_time;
474
475 compare = read_gc0_compare();
476 *out_compare = compare;
477
478 before_time = ktime_get();
479
480
481
482
483
484 before_count = read_gc0_count();
485 back_to_back_c0_hazard();
486 cause = read_gc0_cause();
487 *out_cause = cause;
488
489
490
491
492
493
494 back_to_back_c0_hazard();
495 end_count = read_gc0_count();
496
497
498
499
500
501
502 if (end_count - before_count > compare - before_count - 1)
503 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
504
505
506
507
508
509 kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000);
510}
511
512
513
514
515
516
517
518
519static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
520{
521 struct mips_coproc *cop0 = vcpu->arch.cop0;
522 u32 gctl0, compare, cause;
523
524 gctl0 = read_c0_guestctl0();
525 if (gctl0 & MIPS_GCTL0_GT) {
526
527 write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
528
529
530 _kvm_vz_save_htimer(vcpu, &compare, &cause);
531 } else {
532 compare = read_gc0_compare();
533 cause = read_gc0_cause();
534 }
535
536
537 kvm_write_sw_gc0_cause(cop0, cause);
538 kvm_write_sw_gc0_compare(cop0, compare);
539}
540
541
542
543
544
545
546
547
548void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu)
549{
550 u32 gctl0, compare, cause;
551
552 preempt_disable();
553 gctl0 = read_c0_guestctl0();
554 if (gctl0 & MIPS_GCTL0_GT) {
555
556 write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
557
558
559 _kvm_vz_save_htimer(vcpu, &compare, &cause);
560
561
562 _kvm_vz_restore_stimer(vcpu, compare, cause);
563 }
564 preempt_enable();
565}
566
567
568
569
570
571
572
573
574
575
576
577
578static bool is_eva_access(union mips_instruction inst)
579{
580 if (inst.spec3_format.opcode != spec3_op)
581 return false;
582
583 switch (inst.spec3_format.func) {
584 case lwle_op:
585 case lwre_op:
586 case cachee_op:
587 case sbe_op:
588 case she_op:
589 case sce_op:
590 case swe_op:
591 case swle_op:
592 case swre_op:
593 case prefe_op:
594 case lbue_op:
595 case lhue_op:
596 case lbe_op:
597 case lhe_op:
598 case lle_op:
599 case lwe_op:
600 return true;
601 default:
602 return false;
603 }
604}
605
606
607
608
609
610
611
612
613
614
615
616
617
618static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu)
619{
620 u32 am_lookup;
621 int err;
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639 am_lookup = 0x70080000 << am;
640 if ((s32)am_lookup < 0) {
641
642
643
644
645 if (!eu || !(read_gc0_status() & ST0_ERL))
646 return true;
647 } else {
648 am_lookup <<= 8;
649 if ((s32)am_lookup < 0) {
650 union mips_instruction inst;
651 unsigned int status;
652 u32 *opc;
653
654
655
656
657
658 status = read_gc0_status();
659 if (!(status & (ST0_EXL | ST0_ERL)) &&
660 (status & ST0_KSU))
661 return true;
662
663
664
665
666 opc = (u32 *)vcpu->arch.pc;
667 if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
668 opc += 1;
669 err = kvm_get_badinstr(opc, vcpu, &inst.word);
670 if (!err && is_eva_access(inst))
671 return true;
672 }
673 }
674
675 return false;
676}
677
678
679
680
681
682
683
684
685
686
687
688
689
690static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
691 unsigned long *gpa)
692{
693 u32 gva32 = gva;
694 unsigned long segctl;
695
696 if ((long)gva == (s32)gva32) {
697
698 if (cpu_guest_has_segments) {
699 unsigned long mask, pa;
700
701 switch (gva32 >> 29) {
702 case 0:
703 case 1:
704 segctl = read_gc0_segctl2() >> 16;
705 mask = (unsigned long)0xfc0000000ull;
706 break;
707 case 2:
708 case 3:
709 segctl = read_gc0_segctl2();
710 mask = (unsigned long)0xfc0000000ull;
711 break;
712 case 4:
713 segctl = read_gc0_segctl1() >> 16;
714 mask = (unsigned long)0xfe0000000ull;
715 break;
716 case 5:
717 segctl = read_gc0_segctl1();
718 mask = (unsigned long)0xfe0000000ull;
719 break;
720 case 6:
721 segctl = read_gc0_segctl0() >> 16;
722 mask = (unsigned long)0xfe0000000ull;
723 break;
724 case 7:
725 segctl = read_gc0_segctl0();
726 mask = (unsigned long)0xfe0000000ull;
727 break;
728 default:
729
730
731
732
733 unreachable();
734 }
735
736 if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7,
737 segctl & 0x0008))
738 goto tlb_mapped;
739
740
741 pa = (segctl << 20) & mask;
742 pa |= gva32 & ~mask;
743 *gpa = pa;
744 return 0;
745 } else if ((s32)gva32 < (s32)0xc0000000) {
746
747 *gpa = gva32 & 0x1fffffff;
748 return 0;
749 }
750#ifdef CONFIG_64BIT
751 } else if ((gva & 0xc000000000000000) == 0x8000000000000000) {
752
753 if (cpu_guest_has_segments) {
754
755
756
757
758 segctl = read_gc0_segctl2();
759 if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) {
760 segctl = read_gc0_segctl1();
761 if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7,
762 0))
763 goto tlb_mapped;
764 }
765
766 }
767
768
769
770
771
772
773 *gpa = gva & 0x07ffffffffffffff;
774 return 0;
775#endif
776 }
777
778tlb_mapped:
779 return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa);
780}
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr,
798 unsigned long *gpa)
799{
800 unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 &
801 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
802
803
804 if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) {
805 *gpa = badvaddr;
806 return 0;
807 }
808
809
810 if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA,
811 "Unexpected gexccode %#x\n", gexccode))
812 return -EINVAL;
813
814
815 return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa);
816}
817
818static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu)
819{
820 u32 *opc = (u32 *) vcpu->arch.pc;
821 u32 cause = vcpu->arch.host_cp0_cause;
822 u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
823 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
824 u32 inst = 0;
825
826
827
828
829 if (cause & CAUSEF_BD)
830 opc += 1;
831 kvm_get_badinstr(opc, vcpu, &inst);
832
833 kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
834 exccode, opc, inst, badvaddr,
835 read_gc0_status());
836 kvm_arch_vcpu_dump_regs(vcpu);
837 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
838 return RESUME_HOST;
839}
840
841static unsigned long mips_process_maar(unsigned int op, unsigned long val)
842{
843
844 unsigned long mask = 0xfffff000 | MIPS_MAAR_S | MIPS_MAAR_VL;
845
846 if (read_gc0_pagegrain() & PG_ELPA)
847 mask |= 0x00ffffff00000000ull;
848 if (cpu_guest_has_mvh)
849 mask |= MIPS_MAAR_VH;
850
851
852 if (op == mtc_op) {
853
854 val &= ~MIPS_MAAR_VH;
855 } else if (op == dmtc_op) {
856
857 val &= ~MIPS_MAAR_VH;
858 if (val & MIPS_MAAR_VL)
859 val |= MIPS_MAAR_VH;
860 }
861
862 return val & mask;
863}
864
865static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
866{
867 struct mips_coproc *cop0 = vcpu->arch.cop0;
868
869 val &= MIPS_MAARI_INDEX;
870 if (val == MIPS_MAARI_INDEX)
871 kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1);
872 else if (val < ARRAY_SIZE(vcpu->arch.maar))
873 kvm_write_sw_gc0_maari(cop0, val);
874}
875
876static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
877 u32 *opc, u32 cause,
878 struct kvm_vcpu *vcpu)
879{
880 struct mips_coproc *cop0 = vcpu->arch.cop0;
881 enum emulation_result er = EMULATE_DONE;
882 u32 rt, rd, sel;
883 unsigned long curr_pc;
884 unsigned long val;
885
886
887
888
889
890 curr_pc = vcpu->arch.pc;
891 er = update_pc(vcpu, cause);
892 if (er == EMULATE_FAIL)
893 return er;
894
895 if (inst.co_format.co) {
896 switch (inst.co_format.func) {
897 case wait_op:
898 er = kvm_mips_emul_wait(vcpu);
899 break;
900 default:
901 er = EMULATE_FAIL;
902 }
903 } else {
904 rt = inst.c0r_format.rt;
905 rd = inst.c0r_format.rd;
906 sel = inst.c0r_format.sel;
907
908 switch (inst.c0r_format.rs) {
909 case dmfc_op:
910 case mfc_op:
911#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
912 cop0->stat[rd][sel]++;
913#endif
914 if (rd == MIPS_CP0_COUNT &&
915 sel == 0) {
916 val = kvm_mips_read_count(vcpu);
917 } else if (rd == MIPS_CP0_COMPARE &&
918 sel == 0) {
919 val = read_gc0_compare();
920 } else if (rd == MIPS_CP0_LLADDR &&
921 sel == 0) {
922 if (cpu_guest_has_rw_llb)
923 val = read_gc0_lladdr() &
924 MIPS_LLADDR_LLB;
925 else
926 val = 0;
927 } else if (rd == MIPS_CP0_LLADDR &&
928 sel == 1 &&
929 cpu_guest_has_maar &&
930 !cpu_guest_has_dyn_maar) {
931
932 BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
933 ARRAY_SIZE(vcpu->arch.maar));
934 val = vcpu->arch.maar[
935 kvm_read_sw_gc0_maari(cop0)];
936 } else if ((rd == MIPS_CP0_PRID &&
937 (sel == 0 ||
938 sel == 2 ||
939 sel == 3)) ||
940 (rd == MIPS_CP0_STATUS &&
941 (sel == 2 ||
942 sel == 3)) ||
943 (rd == MIPS_CP0_CONFIG &&
944 (sel == 6 ||
945 sel == 7)) ||
946 (rd == MIPS_CP0_LLADDR &&
947 (sel == 2) &&
948 cpu_guest_has_maar &&
949 !cpu_guest_has_dyn_maar) ||
950 (rd == MIPS_CP0_ERRCTL &&
951 (sel == 0))) {
952 val = cop0->reg[rd][sel];
953#ifdef CONFIG_CPU_LOONGSON64
954 } else if (rd == MIPS_CP0_DIAG &&
955 (sel == 0)) {
956 val = cop0->reg[rd][sel];
957#endif
958 } else {
959 val = 0;
960 er = EMULATE_FAIL;
961 }
962
963 if (er != EMULATE_FAIL) {
964
965 if (inst.c0r_format.rs == mfc_op)
966 val = (int)val;
967 vcpu->arch.gprs[rt] = val;
968 }
969
970 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ?
971 KVM_TRACE_MFC0 : KVM_TRACE_DMFC0,
972 KVM_TRACE_COP0(rd, sel), val);
973 break;
974
975 case dmtc_op:
976 case mtc_op:
977#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
978 cop0->stat[rd][sel]++;
979#endif
980 val = vcpu->arch.gprs[rt];
981 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ?
982 KVM_TRACE_MTC0 : KVM_TRACE_DMTC0,
983 KVM_TRACE_COP0(rd, sel), val);
984
985 if (rd == MIPS_CP0_COUNT &&
986 sel == 0) {
987 kvm_vz_lose_htimer(vcpu);
988 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
989 } else if (rd == MIPS_CP0_COMPARE &&
990 sel == 0) {
991 kvm_mips_write_compare(vcpu,
992 vcpu->arch.gprs[rt],
993 true);
994 } else if (rd == MIPS_CP0_LLADDR &&
995 sel == 0) {
996
997
998
999
1000 if (cpu_guest_has_rw_llb &&
1001 !(val & MIPS_LLADDR_LLB))
1002 write_gc0_lladdr(0);
1003 } else if (rd == MIPS_CP0_LLADDR &&
1004 sel == 1 &&
1005 cpu_guest_has_maar &&
1006 !cpu_guest_has_dyn_maar) {
1007 val = mips_process_maar(inst.c0r_format.rs,
1008 val);
1009
1010
1011 BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
1012 ARRAY_SIZE(vcpu->arch.maar));
1013 vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] =
1014 val;
1015 } else if (rd == MIPS_CP0_LLADDR &&
1016 (sel == 2) &&
1017 cpu_guest_has_maar &&
1018 !cpu_guest_has_dyn_maar) {
1019 kvm_write_maari(vcpu, val);
1020 } else if (rd == MIPS_CP0_CONFIG &&
1021 (sel == 6)) {
1022 cop0->reg[rd][sel] = (int)val;
1023 } else if (rd == MIPS_CP0_ERRCTL &&
1024 (sel == 0)) {
1025
1026#ifdef CONFIG_CPU_LOONGSON64
1027 } else if (rd == MIPS_CP0_DIAG &&
1028 (sel == 0)) {
1029 unsigned long flags;
1030
1031 local_irq_save(flags);
1032 if (val & LOONGSON_DIAG_BTB) {
1033
1034 set_c0_diag(LOONGSON_DIAG_BTB);
1035 }
1036 if (val & LOONGSON_DIAG_ITLB) {
1037
1038 set_c0_diag(LOONGSON_DIAG_ITLB);
1039 }
1040 if (val & LOONGSON_DIAG_DTLB) {
1041
1042 set_c0_diag(LOONGSON_DIAG_DTLB);
1043 }
1044 if (val & LOONGSON_DIAG_VTLB) {
1045
1046 kvm_loongson_clear_guest_vtlb();
1047 }
1048 if (val & LOONGSON_DIAG_FTLB) {
1049
1050 kvm_loongson_clear_guest_ftlb();
1051 }
1052 local_irq_restore(flags);
1053#endif
1054 } else {
1055 er = EMULATE_FAIL;
1056 }
1057 break;
1058
1059 default:
1060 er = EMULATE_FAIL;
1061 break;
1062 }
1063 }
1064
1065 if (er == EMULATE_FAIL) {
1066 kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n",
1067 curr_pc, __func__, inst.word);
1068
1069 vcpu->arch.pc = curr_pc;
1070 }
1071
1072 return er;
1073}
1074
1075static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
1076 u32 *opc, u32 cause,
1077 struct kvm_vcpu *vcpu)
1078{
1079 enum emulation_result er = EMULATE_DONE;
1080 u32 cache, op_inst, op, base;
1081 s16 offset;
1082 struct kvm_vcpu_arch *arch = &vcpu->arch;
1083 unsigned long va, curr_pc;
1084
1085
1086
1087
1088
1089 curr_pc = vcpu->arch.pc;
1090 er = update_pc(vcpu, cause);
1091 if (er == EMULATE_FAIL)
1092 return er;
1093
1094 base = inst.i_format.rs;
1095 op_inst = inst.i_format.rt;
1096 if (cpu_has_mips_r6)
1097 offset = inst.spec3_format.simmediate;
1098 else
1099 offset = inst.i_format.simmediate;
1100 cache = op_inst & CacheOp_Cache;
1101 op = op_inst & CacheOp_Op;
1102
1103 va = arch->gprs[base] + offset;
1104
1105 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1106 cache, op, base, arch->gprs[base], offset);
1107
1108
1109 if (cache != Cache_I && cache != Cache_D)
1110 return EMULATE_DONE;
1111
1112 switch (op_inst) {
1113 case Index_Invalidate_I:
1114 flush_icache_line_indexed(va);
1115 return EMULATE_DONE;
1116 case Index_Writeback_Inv_D:
1117 flush_dcache_line_indexed(va);
1118 return EMULATE_DONE;
1119 case Hit_Invalidate_I:
1120 case Hit_Invalidate_D:
1121 case Hit_Writeback_Inv_D:
1122 if (boot_cpu_type() == CPU_CAVIUM_OCTEON3) {
1123
1124 local_flush_icache_range(0, 0);
1125 return EMULATE_DONE;
1126 }
1127
1128
1129 break;
1130 default:
1131 break;
1132 }
1133
1134 kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1135 curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
1136 offset);
1137
1138 vcpu->arch.pc = curr_pc;
1139
1140 return EMULATE_FAIL;
1141}
1142
1143#ifdef CONFIG_CPU_LOONGSON64
1144static enum emulation_result kvm_vz_gpsi_lwc2(union mips_instruction inst,
1145 u32 *opc, u32 cause,
1146 struct kvm_vcpu *vcpu)
1147{
1148 unsigned int rs, rd;
1149 unsigned int hostcfg;
1150 unsigned long curr_pc;
1151 enum emulation_result er = EMULATE_DONE;
1152
1153
1154
1155
1156
1157 curr_pc = vcpu->arch.pc;
1158 er = update_pc(vcpu, cause);
1159 if (er == EMULATE_FAIL)
1160 return er;
1161
1162 rs = inst.loongson3_lscsr_format.rs;
1163 rd = inst.loongson3_lscsr_format.rd;
1164 switch (inst.loongson3_lscsr_format.fr) {
1165 case 0x8:
1166 ++vcpu->stat.vz_cpucfg_exits;
1167 hostcfg = read_cpucfg(vcpu->arch.gprs[rs]);
1168
1169 switch (vcpu->arch.gprs[rs]) {
1170 case LOONGSON_CFG0:
1171 vcpu->arch.gprs[rd] = 0x14c000;
1172 break;
1173 case LOONGSON_CFG1:
1174 hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI |
1175 LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 |
1176 LOONGSON_CFG1_SFBP);
1177 vcpu->arch.gprs[rd] = hostcfg;
1178 break;
1179 case LOONGSON_CFG2:
1180 hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 |
1181 LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW);
1182 vcpu->arch.gprs[rd] = hostcfg;
1183 break;
1184 case LOONGSON_CFG3:
1185 vcpu->arch.gprs[rd] = hostcfg;
1186 break;
1187 default:
1188
1189 vcpu->arch.gprs[rd] = 0;
1190 break;
1191 }
1192 break;
1193
1194 default:
1195 kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n",
1196 inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc);
1197 er = EMULATE_FAIL;
1198 break;
1199 }
1200
1201
1202 if (er == EMULATE_FAIL) {
1203 kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n",
1204 curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr);
1205
1206 vcpu->arch.pc = curr_pc;
1207 }
1208
1209 return er;
1210}
1211#endif
1212
1213static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
1214 struct kvm_vcpu *vcpu)
1215{
1216 enum emulation_result er = EMULATE_DONE;
1217 struct kvm_vcpu_arch *arch = &vcpu->arch;
1218 union mips_instruction inst;
1219 int rd, rt, sel;
1220 int err;
1221
1222
1223
1224
1225 if (cause & CAUSEF_BD)
1226 opc += 1;
1227 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1228 if (err)
1229 return EMULATE_FAIL;
1230
1231 switch (inst.r_format.opcode) {
1232 case cop0_op:
1233 er = kvm_vz_gpsi_cop0(inst, opc, cause, vcpu);
1234 break;
1235#ifndef CONFIG_CPU_MIPSR6
1236 case cache_op:
1237 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1238 er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
1239 break;
1240#endif
1241#ifdef CONFIG_CPU_LOONGSON64
1242 case lwc2_op:
1243 er = kvm_vz_gpsi_lwc2(inst, opc, cause, vcpu);
1244 break;
1245#endif
1246 case spec3_op:
1247 switch (inst.spec3_format.func) {
1248#ifdef CONFIG_CPU_MIPSR6
1249 case cache6_op:
1250 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1251 er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
1252 break;
1253#endif
1254 case rdhwr_op:
1255 if (inst.r_format.rs || (inst.r_format.re >> 3))
1256 goto unknown;
1257
1258 rd = inst.r_format.rd;
1259 rt = inst.r_format.rt;
1260 sel = inst.r_format.re & 0x7;
1261
1262 switch (rd) {
1263 case MIPS_HWR_CC:
1264 arch->gprs[rt] =
1265 (long)(int)kvm_mips_read_count(vcpu);
1266 break;
1267 default:
1268 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
1269 KVM_TRACE_HWR(rd, sel), 0);
1270 goto unknown;
1271 }
1272
1273 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
1274 KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
1275
1276 er = update_pc(vcpu, cause);
1277 break;
1278 default:
1279 goto unknown;
1280 }
1281 break;
1282unknown:
1283
1284 default:
1285 kvm_err("GPSI exception not supported (%p/%#x)\n",
1286 opc, inst.word);
1287 kvm_arch_vcpu_dump_regs(vcpu);
1288 er = EMULATE_FAIL;
1289 break;
1290 }
1291
1292 return er;
1293}
1294
1295static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc,
1296 struct kvm_vcpu *vcpu)
1297{
1298 enum emulation_result er = EMULATE_DONE;
1299 struct kvm_vcpu_arch *arch = &vcpu->arch;
1300 union mips_instruction inst;
1301 int err;
1302
1303
1304
1305
1306 if (cause & CAUSEF_BD)
1307 opc += 1;
1308 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1309 if (err)
1310 return EMULATE_FAIL;
1311
1312
1313 if (inst.c0r_format.opcode == cop0_op &&
1314 inst.c0r_format.rs == mtc_op &&
1315 inst.c0r_format.z == 0) {
1316 int rt = inst.c0r_format.rt;
1317 int rd = inst.c0r_format.rd;
1318 int sel = inst.c0r_format.sel;
1319 unsigned int val = arch->gprs[rt];
1320 unsigned int old_val, change;
1321
1322 trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel),
1323 val);
1324
1325 if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1326
1327 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1328 val &= ~(ST0_CU1 | ST0_FR);
1329
1330
1331
1332
1333
1334 if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
1335 val &= ~ST0_FR;
1336
1337 old_val = read_gc0_status();
1338 change = val ^ old_val;
1339
1340 if (change & ST0_FR) {
1341
1342
1343
1344
1345
1346 kvm_drop_fpu(vcpu);
1347 }
1348
1349
1350
1351
1352
1353
1354
1355
1356 if (change & ST0_CU1 && !(val & ST0_FR) &&
1357 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1358 kvm_lose_fpu(vcpu);
1359
1360 write_gc0_status(val);
1361 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1362 u32 old_cause = read_gc0_cause();
1363 u32 change = old_cause ^ val;
1364
1365
1366 if (change & CAUSEF_DC) {
1367 if (val & CAUSEF_DC) {
1368 kvm_vz_lose_htimer(vcpu);
1369 kvm_mips_count_disable_cause(vcpu);
1370 } else {
1371 kvm_mips_count_enable_cause(vcpu);
1372 }
1373 }
1374
1375
1376 change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP |
1377 CAUSEF_IP0 | CAUSEF_IP1);
1378
1379
1380 change &= ~CAUSEF_WP | old_cause;
1381
1382 write_gc0_cause(old_cause ^ change);
1383 } else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) {
1384 write_gc0_intctl(val);
1385 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1386 old_val = read_gc0_config5();
1387 change = val ^ old_val;
1388
1389 preempt_disable();
1390
1391
1392
1393
1394
1395 if (change & MIPS_CONF5_FRE &&
1396 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1397 change_c0_config5(MIPS_CONF5_FRE, val);
1398
1399 preempt_enable();
1400
1401 val = old_val ^
1402 (change & kvm_vz_config5_guest_wrmask(vcpu));
1403 write_gc0_config5(val);
1404 } else {
1405 kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n",
1406 opc, inst.word);
1407 er = EMULATE_FAIL;
1408 }
1409
1410 if (er != EMULATE_FAIL)
1411 er = update_pc(vcpu, cause);
1412 } else {
1413 kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n",
1414 opc, inst.word);
1415 er = EMULATE_FAIL;
1416 }
1417
1418 return er;
1419}
1420
1421static enum emulation_result kvm_trap_vz_handle_ghfc(u32 cause, u32 *opc,
1422 struct kvm_vcpu *vcpu)
1423{
1424
1425
1426
1427
1428 trace_kvm_guest_mode_change(vcpu);
1429
1430 return EMULATE_DONE;
1431}
1432
1433static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc,
1434 struct kvm_vcpu *vcpu)
1435{
1436 enum emulation_result er;
1437 union mips_instruction inst;
1438 unsigned long curr_pc;
1439 int err;
1440
1441 if (cause & CAUSEF_BD)
1442 opc += 1;
1443 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1444 if (err)
1445 return EMULATE_FAIL;
1446
1447
1448
1449
1450
1451 curr_pc = vcpu->arch.pc;
1452 er = update_pc(vcpu, cause);
1453 if (er == EMULATE_FAIL)
1454 return er;
1455
1456 er = kvm_mips_emul_hypcall(vcpu, inst);
1457 if (er == EMULATE_FAIL)
1458 vcpu->arch.pc = curr_pc;
1459
1460 return er;
1461}
1462
1463static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode,
1464 u32 cause,
1465 u32 *opc,
1466 struct kvm_vcpu *vcpu)
1467{
1468 u32 inst;
1469
1470
1471
1472
1473 if (cause & CAUSEF_BD)
1474 opc += 1;
1475 kvm_get_badinstr(opc, vcpu, &inst);
1476
1477 kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x Status: %#x\n",
1478 gexccode, opc, inst, read_gc0_status());
1479
1480 return EMULATE_FAIL;
1481}
1482
1483static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
1484{
1485 u32 *opc = (u32 *) vcpu->arch.pc;
1486 u32 cause = vcpu->arch.host_cp0_cause;
1487 enum emulation_result er = EMULATE_DONE;
1488 u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
1489 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
1490 int ret = RESUME_GUEST;
1491
1492 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode);
1493 switch (gexccode) {
1494 case MIPS_GCTL0_GEXC_GPSI:
1495 ++vcpu->stat.vz_gpsi_exits;
1496 er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
1497 break;
1498 case MIPS_GCTL0_GEXC_GSFC:
1499 ++vcpu->stat.vz_gsfc_exits;
1500 er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
1501 break;
1502 case MIPS_GCTL0_GEXC_HC:
1503 ++vcpu->stat.vz_hc_exits;
1504 er = kvm_trap_vz_handle_hc(cause, opc, vcpu);
1505 break;
1506 case MIPS_GCTL0_GEXC_GRR:
1507 ++vcpu->stat.vz_grr_exits;
1508 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1509 vcpu);
1510 break;
1511 case MIPS_GCTL0_GEXC_GVA:
1512 ++vcpu->stat.vz_gva_exits;
1513 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1514 vcpu);
1515 break;
1516 case MIPS_GCTL0_GEXC_GHFC:
1517 ++vcpu->stat.vz_ghfc_exits;
1518 er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu);
1519 break;
1520 case MIPS_GCTL0_GEXC_GPA:
1521 ++vcpu->stat.vz_gpa_exits;
1522 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1523 vcpu);
1524 break;
1525 default:
1526 ++vcpu->stat.vz_resvd_exits;
1527 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1528 vcpu);
1529 break;
1530
1531 }
1532
1533 if (er == EMULATE_DONE) {
1534 ret = RESUME_GUEST;
1535 } else if (er == EMULATE_HYPERCALL) {
1536 ret = kvm_mips_handle_hypcall(vcpu);
1537 } else {
1538 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1539 ret = RESUME_HOST;
1540 }
1541 return ret;
1542}
1543
1544
1545
1546
1547
1548
1549
1550
1551static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
1552{
1553 u32 cause = vcpu->arch.host_cp0_cause;
1554 enum emulation_result er = EMULATE_FAIL;
1555 int ret = RESUME_GUEST;
1556
1557 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
1558
1559
1560
1561
1562
1563 if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
1564 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1565 preempt_enable();
1566 return EMULATE_FAIL;
1567 }
1568
1569 kvm_own_fpu(vcpu);
1570 er = EMULATE_DONE;
1571 }
1572
1573
1574 switch (er) {
1575 case EMULATE_DONE:
1576 ret = RESUME_GUEST;
1577 break;
1578
1579 case EMULATE_FAIL:
1580 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1581 ret = RESUME_HOST;
1582 break;
1583
1584 default:
1585 BUG();
1586 }
1587 return ret;
1588}
1589
1590
1591
1592
1593
1594
1595
1596
1597static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
1598{
1599
1600
1601
1602
1603
1604
1605 if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
1606 (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
1607 !(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
1608 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1609 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1610 return RESUME_HOST;
1611 }
1612
1613 kvm_own_msa(vcpu);
1614
1615 return RESUME_GUEST;
1616}
1617
1618static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
1619{
1620 struct kvm_run *run = vcpu->run;
1621 u32 *opc = (u32 *) vcpu->arch.pc;
1622 u32 cause = vcpu->arch.host_cp0_cause;
1623 ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1624 union mips_instruction inst;
1625 enum emulation_result er = EMULATE_DONE;
1626 int err, ret = RESUME_GUEST;
1627
1628 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) {
1629
1630 if (kvm_is_ifetch_fault(&vcpu->arch)) {
1631 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1632 return RESUME_HOST;
1633 }
1634
1635
1636 if (cause & CAUSEF_BD)
1637 opc += 1;
1638 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1639 if (err) {
1640 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1641 return RESUME_HOST;
1642 }
1643
1644
1645 er = kvm_mips_emulate_load(inst, cause, vcpu);
1646 if (er == EMULATE_FAIL) {
1647 kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1648 opc, badvaddr);
1649 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1650 }
1651 }
1652
1653 if (er == EMULATE_DONE) {
1654 ret = RESUME_GUEST;
1655 } else if (er == EMULATE_DO_MMIO) {
1656 run->exit_reason = KVM_EXIT_MMIO;
1657 ret = RESUME_HOST;
1658 } else {
1659 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1660 ret = RESUME_HOST;
1661 }
1662 return ret;
1663}
1664
1665static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
1666{
1667 struct kvm_run *run = vcpu->run;
1668 u32 *opc = (u32 *) vcpu->arch.pc;
1669 u32 cause = vcpu->arch.host_cp0_cause;
1670 ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1671 union mips_instruction inst;
1672 enum emulation_result er = EMULATE_DONE;
1673 int err;
1674 int ret = RESUME_GUEST;
1675
1676
1677 if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr))
1678 return RESUME_GUEST;
1679 vcpu->arch.host_cp0_badvaddr = badvaddr;
1680
1681 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) {
1682
1683 if (cause & CAUSEF_BD)
1684 opc += 1;
1685 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1686 if (err) {
1687 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1688 return RESUME_HOST;
1689 }
1690
1691
1692 er = kvm_mips_emulate_store(inst, cause, vcpu);
1693 if (er == EMULATE_FAIL) {
1694 kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1695 opc, badvaddr);
1696 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1697 }
1698 }
1699
1700 if (er == EMULATE_DONE) {
1701 ret = RESUME_GUEST;
1702 } else if (er == EMULATE_DO_MMIO) {
1703 run->exit_reason = KVM_EXIT_MMIO;
1704 ret = RESUME_HOST;
1705 } else {
1706 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1707 ret = RESUME_HOST;
1708 }
1709 return ret;
1710}
1711
1712static u64 kvm_vz_get_one_regs[] = {
1713 KVM_REG_MIPS_CP0_INDEX,
1714 KVM_REG_MIPS_CP0_ENTRYLO0,
1715 KVM_REG_MIPS_CP0_ENTRYLO1,
1716 KVM_REG_MIPS_CP0_CONTEXT,
1717 KVM_REG_MIPS_CP0_PAGEMASK,
1718 KVM_REG_MIPS_CP0_PAGEGRAIN,
1719 KVM_REG_MIPS_CP0_WIRED,
1720 KVM_REG_MIPS_CP0_HWRENA,
1721 KVM_REG_MIPS_CP0_BADVADDR,
1722 KVM_REG_MIPS_CP0_COUNT,
1723 KVM_REG_MIPS_CP0_ENTRYHI,
1724 KVM_REG_MIPS_CP0_COMPARE,
1725 KVM_REG_MIPS_CP0_STATUS,
1726 KVM_REG_MIPS_CP0_INTCTL,
1727 KVM_REG_MIPS_CP0_CAUSE,
1728 KVM_REG_MIPS_CP0_EPC,
1729 KVM_REG_MIPS_CP0_PRID,
1730 KVM_REG_MIPS_CP0_EBASE,
1731 KVM_REG_MIPS_CP0_CONFIG,
1732 KVM_REG_MIPS_CP0_CONFIG1,
1733 KVM_REG_MIPS_CP0_CONFIG2,
1734 KVM_REG_MIPS_CP0_CONFIG3,
1735 KVM_REG_MIPS_CP0_CONFIG4,
1736 KVM_REG_MIPS_CP0_CONFIG5,
1737 KVM_REG_MIPS_CP0_CONFIG6,
1738#ifdef CONFIG_64BIT
1739 KVM_REG_MIPS_CP0_XCONTEXT,
1740#endif
1741 KVM_REG_MIPS_CP0_ERROREPC,
1742
1743 KVM_REG_MIPS_COUNT_CTL,
1744 KVM_REG_MIPS_COUNT_RESUME,
1745 KVM_REG_MIPS_COUNT_HZ,
1746};
1747
1748static u64 kvm_vz_get_one_regs_contextconfig[] = {
1749 KVM_REG_MIPS_CP0_CONTEXTCONFIG,
1750#ifdef CONFIG_64BIT
1751 KVM_REG_MIPS_CP0_XCONTEXTCONFIG,
1752#endif
1753};
1754
1755static u64 kvm_vz_get_one_regs_segments[] = {
1756 KVM_REG_MIPS_CP0_SEGCTL0,
1757 KVM_REG_MIPS_CP0_SEGCTL1,
1758 KVM_REG_MIPS_CP0_SEGCTL2,
1759};
1760
1761static u64 kvm_vz_get_one_regs_htw[] = {
1762 KVM_REG_MIPS_CP0_PWBASE,
1763 KVM_REG_MIPS_CP0_PWFIELD,
1764 KVM_REG_MIPS_CP0_PWSIZE,
1765 KVM_REG_MIPS_CP0_PWCTL,
1766};
1767
1768static u64 kvm_vz_get_one_regs_kscratch[] = {
1769 KVM_REG_MIPS_CP0_KSCRATCH1,
1770 KVM_REG_MIPS_CP0_KSCRATCH2,
1771 KVM_REG_MIPS_CP0_KSCRATCH3,
1772 KVM_REG_MIPS_CP0_KSCRATCH4,
1773 KVM_REG_MIPS_CP0_KSCRATCH5,
1774 KVM_REG_MIPS_CP0_KSCRATCH6,
1775};
1776
1777static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
1778{
1779 unsigned long ret;
1780
1781 ret = ARRAY_SIZE(kvm_vz_get_one_regs);
1782 if (cpu_guest_has_userlocal)
1783 ++ret;
1784 if (cpu_guest_has_badinstr)
1785 ++ret;
1786 if (cpu_guest_has_badinstrp)
1787 ++ret;
1788 if (cpu_guest_has_contextconfig)
1789 ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
1790 if (cpu_guest_has_segments)
1791 ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1792 if (cpu_guest_has_htw || cpu_guest_has_ldpte)
1793 ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
1794 if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
1795 ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
1796 ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
1797
1798 return ret;
1799}
1800
1801static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
1802{
1803 u64 index;
1804 unsigned int i;
1805
1806 if (copy_to_user(indices, kvm_vz_get_one_regs,
1807 sizeof(kvm_vz_get_one_regs)))
1808 return -EFAULT;
1809 indices += ARRAY_SIZE(kvm_vz_get_one_regs);
1810
1811 if (cpu_guest_has_userlocal) {
1812 index = KVM_REG_MIPS_CP0_USERLOCAL;
1813 if (copy_to_user(indices, &index, sizeof(index)))
1814 return -EFAULT;
1815 ++indices;
1816 }
1817 if (cpu_guest_has_badinstr) {
1818 index = KVM_REG_MIPS_CP0_BADINSTR;
1819 if (copy_to_user(indices, &index, sizeof(index)))
1820 return -EFAULT;
1821 ++indices;
1822 }
1823 if (cpu_guest_has_badinstrp) {
1824 index = KVM_REG_MIPS_CP0_BADINSTRP;
1825 if (copy_to_user(indices, &index, sizeof(index)))
1826 return -EFAULT;
1827 ++indices;
1828 }
1829 if (cpu_guest_has_contextconfig) {
1830 if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig,
1831 sizeof(kvm_vz_get_one_regs_contextconfig)))
1832 return -EFAULT;
1833 indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
1834 }
1835 if (cpu_guest_has_segments) {
1836 if (copy_to_user(indices, kvm_vz_get_one_regs_segments,
1837 sizeof(kvm_vz_get_one_regs_segments)))
1838 return -EFAULT;
1839 indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1840 }
1841 if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
1842 if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
1843 sizeof(kvm_vz_get_one_regs_htw)))
1844 return -EFAULT;
1845 indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
1846 }
1847 if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) {
1848 for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) {
1849 index = KVM_REG_MIPS_CP0_MAAR(i);
1850 if (copy_to_user(indices, &index, sizeof(index)))
1851 return -EFAULT;
1852 ++indices;
1853 }
1854
1855 index = KVM_REG_MIPS_CP0_MAARI;
1856 if (copy_to_user(indices, &index, sizeof(index)))
1857 return -EFAULT;
1858 ++indices;
1859 }
1860 for (i = 0; i < 6; ++i) {
1861 if (!cpu_guest_has_kscr(i + 2))
1862 continue;
1863
1864 if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i],
1865 sizeof(kvm_vz_get_one_regs_kscratch[i])))
1866 return -EFAULT;
1867 ++indices;
1868 }
1869
1870 return 0;
1871}
1872
1873static inline s64 entrylo_kvm_to_user(unsigned long v)
1874{
1875 s64 mask, ret = v;
1876
1877 if (BITS_PER_LONG == 32) {
1878
1879
1880
1881
1882 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1883 ret &= ~mask;
1884 ret |= ((s64)v & mask) << 32;
1885 }
1886 return ret;
1887}
1888
1889static inline unsigned long entrylo_user_to_kvm(s64 v)
1890{
1891 unsigned long mask, ret = v;
1892
1893 if (BITS_PER_LONG == 32) {
1894
1895
1896
1897
1898 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1899 ret &= ~mask;
1900 ret |= (v >> 32) & mask;
1901 }
1902 return ret;
1903}
1904
1905static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
1906 const struct kvm_one_reg *reg,
1907 s64 *v)
1908{
1909 struct mips_coproc *cop0 = vcpu->arch.cop0;
1910 unsigned int idx;
1911
1912 switch (reg->id) {
1913 case KVM_REG_MIPS_CP0_INDEX:
1914 *v = (long)read_gc0_index();
1915 break;
1916 case KVM_REG_MIPS_CP0_ENTRYLO0:
1917 *v = entrylo_kvm_to_user(read_gc0_entrylo0());
1918 break;
1919 case KVM_REG_MIPS_CP0_ENTRYLO1:
1920 *v = entrylo_kvm_to_user(read_gc0_entrylo1());
1921 break;
1922 case KVM_REG_MIPS_CP0_CONTEXT:
1923 *v = (long)read_gc0_context();
1924 break;
1925 case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
1926 if (!cpu_guest_has_contextconfig)
1927 return -EINVAL;
1928 *v = read_gc0_contextconfig();
1929 break;
1930 case KVM_REG_MIPS_CP0_USERLOCAL:
1931 if (!cpu_guest_has_userlocal)
1932 return -EINVAL;
1933 *v = read_gc0_userlocal();
1934 break;
1935#ifdef CONFIG_64BIT
1936 case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
1937 if (!cpu_guest_has_contextconfig)
1938 return -EINVAL;
1939 *v = read_gc0_xcontextconfig();
1940 break;
1941#endif
1942 case KVM_REG_MIPS_CP0_PAGEMASK:
1943 *v = (long)read_gc0_pagemask();
1944 break;
1945 case KVM_REG_MIPS_CP0_PAGEGRAIN:
1946 *v = (long)read_gc0_pagegrain();
1947 break;
1948 case KVM_REG_MIPS_CP0_SEGCTL0:
1949 if (!cpu_guest_has_segments)
1950 return -EINVAL;
1951 *v = read_gc0_segctl0();
1952 break;
1953 case KVM_REG_MIPS_CP0_SEGCTL1:
1954 if (!cpu_guest_has_segments)
1955 return -EINVAL;
1956 *v = read_gc0_segctl1();
1957 break;
1958 case KVM_REG_MIPS_CP0_SEGCTL2:
1959 if (!cpu_guest_has_segments)
1960 return -EINVAL;
1961 *v = read_gc0_segctl2();
1962 break;
1963 case KVM_REG_MIPS_CP0_PWBASE:
1964 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1965 return -EINVAL;
1966 *v = read_gc0_pwbase();
1967 break;
1968 case KVM_REG_MIPS_CP0_PWFIELD:
1969 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1970 return -EINVAL;
1971 *v = read_gc0_pwfield();
1972 break;
1973 case KVM_REG_MIPS_CP0_PWSIZE:
1974 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1975 return -EINVAL;
1976 *v = read_gc0_pwsize();
1977 break;
1978 case KVM_REG_MIPS_CP0_WIRED:
1979 *v = (long)read_gc0_wired();
1980 break;
1981 case KVM_REG_MIPS_CP0_PWCTL:
1982 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1983 return -EINVAL;
1984 *v = read_gc0_pwctl();
1985 break;
1986 case KVM_REG_MIPS_CP0_HWRENA:
1987 *v = (long)read_gc0_hwrena();
1988 break;
1989 case KVM_REG_MIPS_CP0_BADVADDR:
1990 *v = (long)read_gc0_badvaddr();
1991 break;
1992 case KVM_REG_MIPS_CP0_BADINSTR:
1993 if (!cpu_guest_has_badinstr)
1994 return -EINVAL;
1995 *v = read_gc0_badinstr();
1996 break;
1997 case KVM_REG_MIPS_CP0_BADINSTRP:
1998 if (!cpu_guest_has_badinstrp)
1999 return -EINVAL;
2000 *v = read_gc0_badinstrp();
2001 break;
2002 case KVM_REG_MIPS_CP0_COUNT:
2003 *v = kvm_mips_read_count(vcpu);
2004 break;
2005 case KVM_REG_MIPS_CP0_ENTRYHI:
2006 *v = (long)read_gc0_entryhi();
2007 break;
2008 case KVM_REG_MIPS_CP0_COMPARE:
2009 *v = (long)read_gc0_compare();
2010 break;
2011 case KVM_REG_MIPS_CP0_STATUS:
2012 *v = (long)read_gc0_status();
2013 break;
2014 case KVM_REG_MIPS_CP0_INTCTL:
2015 *v = read_gc0_intctl();
2016 break;
2017 case KVM_REG_MIPS_CP0_CAUSE:
2018 *v = (long)read_gc0_cause();
2019 break;
2020 case KVM_REG_MIPS_CP0_EPC:
2021 *v = (long)read_gc0_epc();
2022 break;
2023 case KVM_REG_MIPS_CP0_PRID:
2024 switch (boot_cpu_type()) {
2025 case CPU_CAVIUM_OCTEON3:
2026
2027 *v = read_gc0_prid();
2028 break;
2029 default:
2030 *v = (long)kvm_read_c0_guest_prid(cop0);
2031 break;
2032 }
2033 break;
2034 case KVM_REG_MIPS_CP0_EBASE:
2035 *v = kvm_vz_read_gc0_ebase();
2036 break;
2037 case KVM_REG_MIPS_CP0_CONFIG:
2038 *v = read_gc0_config();
2039 break;
2040 case KVM_REG_MIPS_CP0_CONFIG1:
2041 if (!cpu_guest_has_conf1)
2042 return -EINVAL;
2043 *v = read_gc0_config1();
2044 break;
2045 case KVM_REG_MIPS_CP0_CONFIG2:
2046 if (!cpu_guest_has_conf2)
2047 return -EINVAL;
2048 *v = read_gc0_config2();
2049 break;
2050 case KVM_REG_MIPS_CP0_CONFIG3:
2051 if (!cpu_guest_has_conf3)
2052 return -EINVAL;
2053 *v = read_gc0_config3();
2054 break;
2055 case KVM_REG_MIPS_CP0_CONFIG4:
2056 if (!cpu_guest_has_conf4)
2057 return -EINVAL;
2058 *v = read_gc0_config4();
2059 break;
2060 case KVM_REG_MIPS_CP0_CONFIG5:
2061 if (!cpu_guest_has_conf5)
2062 return -EINVAL;
2063 *v = read_gc0_config5();
2064 break;
2065 case KVM_REG_MIPS_CP0_CONFIG6:
2066 *v = kvm_read_sw_gc0_config6(cop0);
2067 break;
2068 case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
2069 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2070 return -EINVAL;
2071 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
2072 if (idx >= ARRAY_SIZE(vcpu->arch.maar))
2073 return -EINVAL;
2074 *v = vcpu->arch.maar[idx];
2075 break;
2076 case KVM_REG_MIPS_CP0_MAARI:
2077 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2078 return -EINVAL;
2079 *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0);
2080 break;
2081#ifdef CONFIG_64BIT
2082 case KVM_REG_MIPS_CP0_XCONTEXT:
2083 *v = read_gc0_xcontext();
2084 break;
2085#endif
2086 case KVM_REG_MIPS_CP0_ERROREPC:
2087 *v = (long)read_gc0_errorepc();
2088 break;
2089 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
2090 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
2091 if (!cpu_guest_has_kscr(idx))
2092 return -EINVAL;
2093 switch (idx) {
2094 case 2:
2095 *v = (long)read_gc0_kscratch1();
2096 break;
2097 case 3:
2098 *v = (long)read_gc0_kscratch2();
2099 break;
2100 case 4:
2101 *v = (long)read_gc0_kscratch3();
2102 break;
2103 case 5:
2104 *v = (long)read_gc0_kscratch4();
2105 break;
2106 case 6:
2107 *v = (long)read_gc0_kscratch5();
2108 break;
2109 case 7:
2110 *v = (long)read_gc0_kscratch6();
2111 break;
2112 }
2113 break;
2114 case KVM_REG_MIPS_COUNT_CTL:
2115 *v = vcpu->arch.count_ctl;
2116 break;
2117 case KVM_REG_MIPS_COUNT_RESUME:
2118 *v = ktime_to_ns(vcpu->arch.count_resume);
2119 break;
2120 case KVM_REG_MIPS_COUNT_HZ:
2121 *v = vcpu->arch.count_hz;
2122 break;
2123 default:
2124 return -EINVAL;
2125 }
2126 return 0;
2127}
2128
2129static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
2130 const struct kvm_one_reg *reg,
2131 s64 v)
2132{
2133 struct mips_coproc *cop0 = vcpu->arch.cop0;
2134 unsigned int idx;
2135 int ret = 0;
2136 unsigned int cur, change;
2137
2138 switch (reg->id) {
2139 case KVM_REG_MIPS_CP0_INDEX:
2140 write_gc0_index(v);
2141 break;
2142 case KVM_REG_MIPS_CP0_ENTRYLO0:
2143 write_gc0_entrylo0(entrylo_user_to_kvm(v));
2144 break;
2145 case KVM_REG_MIPS_CP0_ENTRYLO1:
2146 write_gc0_entrylo1(entrylo_user_to_kvm(v));
2147 break;
2148 case KVM_REG_MIPS_CP0_CONTEXT:
2149 write_gc0_context(v);
2150 break;
2151 case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
2152 if (!cpu_guest_has_contextconfig)
2153 return -EINVAL;
2154 write_gc0_contextconfig(v);
2155 break;
2156 case KVM_REG_MIPS_CP0_USERLOCAL:
2157 if (!cpu_guest_has_userlocal)
2158 return -EINVAL;
2159 write_gc0_userlocal(v);
2160 break;
2161#ifdef CONFIG_64BIT
2162 case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
2163 if (!cpu_guest_has_contextconfig)
2164 return -EINVAL;
2165 write_gc0_xcontextconfig(v);
2166 break;
2167#endif
2168 case KVM_REG_MIPS_CP0_PAGEMASK:
2169 write_gc0_pagemask(v);
2170 break;
2171 case KVM_REG_MIPS_CP0_PAGEGRAIN:
2172 write_gc0_pagegrain(v);
2173 break;
2174 case KVM_REG_MIPS_CP0_SEGCTL0:
2175 if (!cpu_guest_has_segments)
2176 return -EINVAL;
2177 write_gc0_segctl0(v);
2178 break;
2179 case KVM_REG_MIPS_CP0_SEGCTL1:
2180 if (!cpu_guest_has_segments)
2181 return -EINVAL;
2182 write_gc0_segctl1(v);
2183 break;
2184 case KVM_REG_MIPS_CP0_SEGCTL2:
2185 if (!cpu_guest_has_segments)
2186 return -EINVAL;
2187 write_gc0_segctl2(v);
2188 break;
2189 case KVM_REG_MIPS_CP0_PWBASE:
2190 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2191 return -EINVAL;
2192 write_gc0_pwbase(v);
2193 break;
2194 case KVM_REG_MIPS_CP0_PWFIELD:
2195 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2196 return -EINVAL;
2197 write_gc0_pwfield(v);
2198 break;
2199 case KVM_REG_MIPS_CP0_PWSIZE:
2200 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2201 return -EINVAL;
2202 write_gc0_pwsize(v);
2203 break;
2204 case KVM_REG_MIPS_CP0_WIRED:
2205 change_gc0_wired(MIPSR6_WIRED_WIRED, v);
2206 break;
2207 case KVM_REG_MIPS_CP0_PWCTL:
2208 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2209 return -EINVAL;
2210 write_gc0_pwctl(v);
2211 break;
2212 case KVM_REG_MIPS_CP0_HWRENA:
2213 write_gc0_hwrena(v);
2214 break;
2215 case KVM_REG_MIPS_CP0_BADVADDR:
2216 write_gc0_badvaddr(v);
2217 break;
2218 case KVM_REG_MIPS_CP0_BADINSTR:
2219 if (!cpu_guest_has_badinstr)
2220 return -EINVAL;
2221 write_gc0_badinstr(v);
2222 break;
2223 case KVM_REG_MIPS_CP0_BADINSTRP:
2224 if (!cpu_guest_has_badinstrp)
2225 return -EINVAL;
2226 write_gc0_badinstrp(v);
2227 break;
2228 case KVM_REG_MIPS_CP0_COUNT:
2229 kvm_mips_write_count(vcpu, v);
2230 break;
2231 case KVM_REG_MIPS_CP0_ENTRYHI:
2232 write_gc0_entryhi(v);
2233 break;
2234 case KVM_REG_MIPS_CP0_COMPARE:
2235 kvm_mips_write_compare(vcpu, v, false);
2236 break;
2237 case KVM_REG_MIPS_CP0_STATUS:
2238 write_gc0_status(v);
2239 break;
2240 case KVM_REG_MIPS_CP0_INTCTL:
2241 write_gc0_intctl(v);
2242 break;
2243 case KVM_REG_MIPS_CP0_CAUSE:
2244
2245
2246
2247
2248
2249 if ((read_gc0_cause() ^ v) & CAUSEF_DC) {
2250 if (v & CAUSEF_DC) {
2251
2252 kvm_mips_count_disable_cause(vcpu);
2253 change_gc0_cause((u32)~CAUSEF_DC, v);
2254 } else {
2255
2256 change_gc0_cause((u32)~CAUSEF_DC, v);
2257 kvm_mips_count_enable_cause(vcpu);
2258 }
2259 } else {
2260 write_gc0_cause(v);
2261 }
2262 break;
2263 case KVM_REG_MIPS_CP0_EPC:
2264 write_gc0_epc(v);
2265 break;
2266 case KVM_REG_MIPS_CP0_PRID:
2267 switch (boot_cpu_type()) {
2268 case CPU_CAVIUM_OCTEON3:
2269
2270 break;
2271 default:
2272 kvm_write_c0_guest_prid(cop0, v);
2273 break;
2274 }
2275 break;
2276 case KVM_REG_MIPS_CP0_EBASE:
2277 kvm_vz_write_gc0_ebase(v);
2278 break;
2279 case KVM_REG_MIPS_CP0_CONFIG:
2280 cur = read_gc0_config();
2281 change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu);
2282 if (change) {
2283 v = cur ^ change;
2284 write_gc0_config(v);
2285 }
2286 break;
2287 case KVM_REG_MIPS_CP0_CONFIG1:
2288 if (!cpu_guest_has_conf1)
2289 break;
2290 cur = read_gc0_config1();
2291 change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu);
2292 if (change) {
2293 v = cur ^ change;
2294 write_gc0_config1(v);
2295 }
2296 break;
2297 case KVM_REG_MIPS_CP0_CONFIG2:
2298 if (!cpu_guest_has_conf2)
2299 break;
2300 cur = read_gc0_config2();
2301 change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu);
2302 if (change) {
2303 v = cur ^ change;
2304 write_gc0_config2(v);
2305 }
2306 break;
2307 case KVM_REG_MIPS_CP0_CONFIG3:
2308 if (!cpu_guest_has_conf3)
2309 break;
2310 cur = read_gc0_config3();
2311 change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu);
2312 if (change) {
2313 v = cur ^ change;
2314 write_gc0_config3(v);
2315 }
2316 break;
2317 case KVM_REG_MIPS_CP0_CONFIG4:
2318 if (!cpu_guest_has_conf4)
2319 break;
2320 cur = read_gc0_config4();
2321 change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu);
2322 if (change) {
2323 v = cur ^ change;
2324 write_gc0_config4(v);
2325 }
2326 break;
2327 case KVM_REG_MIPS_CP0_CONFIG5:
2328 if (!cpu_guest_has_conf5)
2329 break;
2330 cur = read_gc0_config5();
2331 change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu);
2332 if (change) {
2333 v = cur ^ change;
2334 write_gc0_config5(v);
2335 }
2336 break;
2337 case KVM_REG_MIPS_CP0_CONFIG6:
2338 cur = kvm_read_sw_gc0_config6(cop0);
2339 change = (cur ^ v) & kvm_vz_config6_user_wrmask(vcpu);
2340 if (change) {
2341 v = cur ^ change;
2342 kvm_write_sw_gc0_config6(cop0, (int)v);
2343 }
2344 break;
2345 case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
2346 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2347 return -EINVAL;
2348 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
2349 if (idx >= ARRAY_SIZE(vcpu->arch.maar))
2350 return -EINVAL;
2351 vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v);
2352 break;
2353 case KVM_REG_MIPS_CP0_MAARI:
2354 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2355 return -EINVAL;
2356 kvm_write_maari(vcpu, v);
2357 break;
2358#ifdef CONFIG_64BIT
2359 case KVM_REG_MIPS_CP0_XCONTEXT:
2360 write_gc0_xcontext(v);
2361 break;
2362#endif
2363 case KVM_REG_MIPS_CP0_ERROREPC:
2364 write_gc0_errorepc(v);
2365 break;
2366 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
2367 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
2368 if (!cpu_guest_has_kscr(idx))
2369 return -EINVAL;
2370 switch (idx) {
2371 case 2:
2372 write_gc0_kscratch1(v);
2373 break;
2374 case 3:
2375 write_gc0_kscratch2(v);
2376 break;
2377 case 4:
2378 write_gc0_kscratch3(v);
2379 break;
2380 case 5:
2381 write_gc0_kscratch4(v);
2382 break;
2383 case 6:
2384 write_gc0_kscratch5(v);
2385 break;
2386 case 7:
2387 write_gc0_kscratch6(v);
2388 break;
2389 }
2390 break;
2391 case KVM_REG_MIPS_COUNT_CTL:
2392 ret = kvm_mips_set_count_ctl(vcpu, v);
2393 break;
2394 case KVM_REG_MIPS_COUNT_RESUME:
2395 ret = kvm_mips_set_count_resume(vcpu, v);
2396 break;
2397 case KVM_REG_MIPS_COUNT_HZ:
2398 ret = kvm_mips_set_count_hz(vcpu, v);
2399 break;
2400 default:
2401 return -EINVAL;
2402 }
2403 return ret;
2404}
2405
2406#define guestid_cache(cpu) (cpu_data[cpu].guestid_cache)
2407static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu)
2408{
2409 unsigned long guestid = guestid_cache(cpu);
2410
2411 if (!(++guestid & GUESTID_MASK)) {
2412 if (cpu_has_vtag_icache)
2413 flush_icache_all();
2414
2415 if (!guestid)
2416 guestid = GUESTID_FIRST_VERSION;
2417
2418 ++guestid;
2419
2420
2421 kvm_vz_local_flush_roottlb_all_guests();
2422 kvm_vz_local_flush_guesttlb_all();
2423 }
2424
2425 guestid_cache(cpu) = guestid;
2426}
2427
2428
2429static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
2430{
2431 int ret = 0;
2432 int i;
2433
2434 if (!kvm_request_pending(vcpu))
2435 return 0;
2436
2437 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2438 if (cpu_has_guestid) {
2439
2440 for_each_possible_cpu(i)
2441 vcpu->arch.vzguestid[i] = 0;
2442
2443 ret = 1;
2444 }
2445
2446
2447
2448
2449
2450
2451 }
2452
2453 return ret;
2454}
2455
2456static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu)
2457{
2458 unsigned int wired = read_gc0_wired();
2459 struct kvm_mips_tlb *tlbs;
2460 int i;
2461
2462
2463 wired &= MIPSR6_WIRED_WIRED;
2464 if (wired > vcpu->arch.wired_tlb_limit) {
2465 tlbs = krealloc(vcpu->arch.wired_tlb, wired *
2466 sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC);
2467 if (WARN_ON(!tlbs)) {
2468
2469 wired = vcpu->arch.wired_tlb_limit;
2470 } else {
2471 vcpu->arch.wired_tlb = tlbs;
2472 vcpu->arch.wired_tlb_limit = wired;
2473 }
2474 }
2475
2476 if (wired)
2477
2478 kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired);
2479
2480 for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) {
2481 vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
2482 vcpu->arch.wired_tlb[i].tlb_lo[0] = 0;
2483 vcpu->arch.wired_tlb[i].tlb_lo[1] = 0;
2484 vcpu->arch.wired_tlb[i].tlb_mask = 0;
2485 }
2486 vcpu->arch.wired_tlb_used = wired;
2487}
2488
2489static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu)
2490{
2491
2492 if (vcpu->arch.wired_tlb)
2493 kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0,
2494 vcpu->arch.wired_tlb_used);
2495}
2496
2497static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
2498{
2499 struct kvm *kvm = vcpu->kvm;
2500 struct mm_struct *gpa_mm = &kvm->arch.gpa_mm;
2501 bool migrated;
2502
2503
2504
2505
2506
2507 migrated = (vcpu->arch.last_exec_cpu != cpu);
2508 vcpu->arch.last_exec_cpu = cpu;
2509
2510
2511
2512
2513
2514
2515
2516 if (cpu_has_guestid) {
2517
2518
2519
2520
2521
2522
2523
2524 if (migrated ||
2525 (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
2526 GUESTID_VERSION_MASK) {
2527 kvm_vz_get_new_guestid(cpu, vcpu);
2528 vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
2529 trace_kvm_guestid_change(vcpu,
2530 vcpu->arch.vzguestid[cpu]);
2531 }
2532
2533
2534 change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
2535 } else {
2536
2537
2538
2539
2540
2541
2542
2543 if (migrated || last_exec_vcpu[cpu] != vcpu)
2544 kvm_vz_local_flush_guesttlb_all();
2545 last_exec_vcpu[cpu] = vcpu;
2546
2547
2548
2549
2550
2551 if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
2552 get_new_mmu_context(gpa_mm);
2553 else
2554 check_mmu_context(gpa_mm);
2555 }
2556}
2557
2558static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2559{
2560 struct mips_coproc *cop0 = vcpu->arch.cop0;
2561 bool migrated, all;
2562
2563
2564
2565
2566
2567 migrated = (vcpu->arch.last_sched_cpu != cpu);
2568
2569
2570
2571
2572
2573 all = migrated || (last_vcpu[cpu] != vcpu);
2574 last_vcpu[cpu] = vcpu;
2575
2576
2577
2578
2579
2580 kvm_restore_gc0_wired(cop0);
2581 if (current->flags & PF_VCPU) {
2582 tlbw_use_hazard();
2583 kvm_vz_vcpu_load_tlb(vcpu, cpu);
2584 kvm_vz_vcpu_load_wired(vcpu);
2585 }
2586
2587
2588
2589
2590
2591 kvm_vz_restore_timer(vcpu);
2592
2593
2594 if (kvm_trace_guest_mode_change)
2595 set_c0_guestctl0(MIPS_GCTL0_MC);
2596 else
2597 clear_c0_guestctl0(MIPS_GCTL0_MC);
2598
2599
2600 if (!all)
2601 return 0;
2602
2603
2604
2605
2606
2607
2608 kvm_restore_gc0_config(cop0);
2609 if (cpu_guest_has_conf1)
2610 kvm_restore_gc0_config1(cop0);
2611 if (cpu_guest_has_conf2)
2612 kvm_restore_gc0_config2(cop0);
2613 if (cpu_guest_has_conf3)
2614 kvm_restore_gc0_config3(cop0);
2615 if (cpu_guest_has_conf4)
2616 kvm_restore_gc0_config4(cop0);
2617 if (cpu_guest_has_conf5)
2618 kvm_restore_gc0_config5(cop0);
2619 if (cpu_guest_has_conf6)
2620 kvm_restore_gc0_config6(cop0);
2621 if (cpu_guest_has_conf7)
2622 kvm_restore_gc0_config7(cop0);
2623
2624 kvm_restore_gc0_index(cop0);
2625 kvm_restore_gc0_entrylo0(cop0);
2626 kvm_restore_gc0_entrylo1(cop0);
2627 kvm_restore_gc0_context(cop0);
2628 if (cpu_guest_has_contextconfig)
2629 kvm_restore_gc0_contextconfig(cop0);
2630#ifdef CONFIG_64BIT
2631 kvm_restore_gc0_xcontext(cop0);
2632 if (cpu_guest_has_contextconfig)
2633 kvm_restore_gc0_xcontextconfig(cop0);
2634#endif
2635 kvm_restore_gc0_pagemask(cop0);
2636 kvm_restore_gc0_pagegrain(cop0);
2637 kvm_restore_gc0_hwrena(cop0);
2638 kvm_restore_gc0_badvaddr(cop0);
2639 kvm_restore_gc0_entryhi(cop0);
2640 kvm_restore_gc0_status(cop0);
2641 kvm_restore_gc0_intctl(cop0);
2642 kvm_restore_gc0_epc(cop0);
2643 kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0));
2644 if (cpu_guest_has_userlocal)
2645 kvm_restore_gc0_userlocal(cop0);
2646
2647 kvm_restore_gc0_errorepc(cop0);
2648
2649
2650 if (cpu_guest_has_conf4) {
2651 if (cpu_guest_has_kscr(2))
2652 kvm_restore_gc0_kscratch1(cop0);
2653 if (cpu_guest_has_kscr(3))
2654 kvm_restore_gc0_kscratch2(cop0);
2655 if (cpu_guest_has_kscr(4))
2656 kvm_restore_gc0_kscratch3(cop0);
2657 if (cpu_guest_has_kscr(5))
2658 kvm_restore_gc0_kscratch4(cop0);
2659 if (cpu_guest_has_kscr(6))
2660 kvm_restore_gc0_kscratch5(cop0);
2661 if (cpu_guest_has_kscr(7))
2662 kvm_restore_gc0_kscratch6(cop0);
2663 }
2664
2665 if (cpu_guest_has_badinstr)
2666 kvm_restore_gc0_badinstr(cop0);
2667 if (cpu_guest_has_badinstrp)
2668 kvm_restore_gc0_badinstrp(cop0);
2669
2670 if (cpu_guest_has_segments) {
2671 kvm_restore_gc0_segctl0(cop0);
2672 kvm_restore_gc0_segctl1(cop0);
2673 kvm_restore_gc0_segctl2(cop0);
2674 }
2675
2676
2677 if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
2678 kvm_restore_gc0_pwbase(cop0);
2679 kvm_restore_gc0_pwfield(cop0);
2680 kvm_restore_gc0_pwsize(cop0);
2681 kvm_restore_gc0_pwctl(cop0);
2682 }
2683
2684
2685 if (cpu_has_guestctl2)
2686 write_c0_guestctl2(
2687 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]);
2688
2689
2690
2691
2692
2693
2694 if (vcpu->kvm->created_vcpus > 1)
2695 write_gc0_lladdr(0);
2696
2697 return 0;
2698}
2699
2700static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
2701{
2702 struct mips_coproc *cop0 = vcpu->arch.cop0;
2703
2704 if (current->flags & PF_VCPU)
2705 kvm_vz_vcpu_save_wired(vcpu);
2706
2707 kvm_lose_fpu(vcpu);
2708
2709 kvm_save_gc0_index(cop0);
2710 kvm_save_gc0_entrylo0(cop0);
2711 kvm_save_gc0_entrylo1(cop0);
2712 kvm_save_gc0_context(cop0);
2713 if (cpu_guest_has_contextconfig)
2714 kvm_save_gc0_contextconfig(cop0);
2715#ifdef CONFIG_64BIT
2716 kvm_save_gc0_xcontext(cop0);
2717 if (cpu_guest_has_contextconfig)
2718 kvm_save_gc0_xcontextconfig(cop0);
2719#endif
2720 kvm_save_gc0_pagemask(cop0);
2721 kvm_save_gc0_pagegrain(cop0);
2722 kvm_save_gc0_wired(cop0);
2723
2724 clear_gc0_wired(MIPSR6_WIRED_WIRED);
2725 kvm_save_gc0_hwrena(cop0);
2726 kvm_save_gc0_badvaddr(cop0);
2727 kvm_save_gc0_entryhi(cop0);
2728 kvm_save_gc0_status(cop0);
2729 kvm_save_gc0_intctl(cop0);
2730 kvm_save_gc0_epc(cop0);
2731 kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase());
2732 if (cpu_guest_has_userlocal)
2733 kvm_save_gc0_userlocal(cop0);
2734
2735
2736 kvm_save_gc0_config(cop0);
2737 if (cpu_guest_has_conf1)
2738 kvm_save_gc0_config1(cop0);
2739 if (cpu_guest_has_conf2)
2740 kvm_save_gc0_config2(cop0);
2741 if (cpu_guest_has_conf3)
2742 kvm_save_gc0_config3(cop0);
2743 if (cpu_guest_has_conf4)
2744 kvm_save_gc0_config4(cop0);
2745 if (cpu_guest_has_conf5)
2746 kvm_save_gc0_config5(cop0);
2747 if (cpu_guest_has_conf6)
2748 kvm_save_gc0_config6(cop0);
2749 if (cpu_guest_has_conf7)
2750 kvm_save_gc0_config7(cop0);
2751
2752 kvm_save_gc0_errorepc(cop0);
2753
2754
2755 if (cpu_guest_has_conf4) {
2756 if (cpu_guest_has_kscr(2))
2757 kvm_save_gc0_kscratch1(cop0);
2758 if (cpu_guest_has_kscr(3))
2759 kvm_save_gc0_kscratch2(cop0);
2760 if (cpu_guest_has_kscr(4))
2761 kvm_save_gc0_kscratch3(cop0);
2762 if (cpu_guest_has_kscr(5))
2763 kvm_save_gc0_kscratch4(cop0);
2764 if (cpu_guest_has_kscr(6))
2765 kvm_save_gc0_kscratch5(cop0);
2766 if (cpu_guest_has_kscr(7))
2767 kvm_save_gc0_kscratch6(cop0);
2768 }
2769
2770 if (cpu_guest_has_badinstr)
2771 kvm_save_gc0_badinstr(cop0);
2772 if (cpu_guest_has_badinstrp)
2773 kvm_save_gc0_badinstrp(cop0);
2774
2775 if (cpu_guest_has_segments) {
2776 kvm_save_gc0_segctl0(cop0);
2777 kvm_save_gc0_segctl1(cop0);
2778 kvm_save_gc0_segctl2(cop0);
2779 }
2780
2781
2782 if (cpu_guest_has_ldpte || (cpu_guest_has_htw &&
2783 kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW)) {
2784 kvm_save_gc0_pwbase(cop0);
2785 kvm_save_gc0_pwfield(cop0);
2786 kvm_save_gc0_pwsize(cop0);
2787 kvm_save_gc0_pwctl(cop0);
2788 }
2789
2790 kvm_vz_save_timer(vcpu);
2791
2792
2793 if (cpu_has_guestctl2)
2794 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] =
2795 read_c0_guestctl2();
2796
2797 return 0;
2798}
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size)
2811{
2812 unsigned int config4 = 0, ret = 0, limit;
2813
2814
2815 if (cpu_guest_has_conf1)
2816 change_gc0_config1(MIPS_CONF1_TLBS,
2817 (size - 1) << MIPS_CONF1_TLBS_SHIFT);
2818 if (cpu_guest_has_conf4) {
2819 config4 = read_gc0_config4();
2820 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2821 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) {
2822 config4 &= ~MIPS_CONF4_VTLBSIZEEXT;
2823 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2824 MIPS_CONF4_VTLBSIZEEXT_SHIFT;
2825 } else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2826 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) {
2827 config4 &= ~MIPS_CONF4_MMUSIZEEXT;
2828 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2829 MIPS_CONF4_MMUSIZEEXT_SHIFT;
2830 }
2831 write_gc0_config4(config4);
2832 }
2833
2834
2835
2836
2837
2838
2839 if (cpu_has_mips_r6) {
2840 limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >>
2841 MIPSR6_WIRED_LIMIT_SHIFT;
2842 if (size - 1 <= limit)
2843 limit = 0;
2844 write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT);
2845 }
2846
2847
2848 back_to_back_c0_hazard();
2849 if (cpu_guest_has_conf1)
2850 ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >>
2851 MIPS_CONF1_TLBS_SHIFT;
2852 if (config4) {
2853 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2854 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT)
2855 ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
2856 MIPS_CONF4_VTLBSIZEEXT_SHIFT) <<
2857 MIPS_CONF1_TLBS_SIZE;
2858 else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2859 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT)
2860 ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >>
2861 MIPS_CONF4_MMUSIZEEXT_SHIFT) <<
2862 MIPS_CONF1_TLBS_SIZE;
2863 }
2864 return ret + 1;
2865}
2866
2867static int kvm_vz_hardware_enable(void)
2868{
2869 unsigned int mmu_size, guest_mmu_size, ftlb_size;
2870 u64 guest_cvmctl, cvmvmconfig;
2871
2872 switch (current_cpu_type()) {
2873 case CPU_CAVIUM_OCTEON3:
2874
2875 guest_cvmctl = read_gc0_cvmctl();
2876 guest_cvmctl &= ~CVMCTL_IPTI;
2877 guest_cvmctl |= 7ull << CVMCTL_IPTI_SHIFT;
2878 guest_cvmctl &= ~CVMCTL_IPPCI;
2879 guest_cvmctl |= 6ull << CVMCTL_IPPCI_SHIFT;
2880 write_gc0_cvmctl(guest_cvmctl);
2881
2882 cvmvmconfig = read_c0_cvmvmconfig();
2883
2884 cvmvmconfig |= CVMVMCONF_DGHT;
2885
2886 mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
2887 >> CVMVMCONF_MMUSIZEM1_S) + 1;
2888 guest_mmu_size = mmu_size / 2;
2889 mmu_size -= guest_mmu_size;
2890 cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
2891 cvmvmconfig |= mmu_size - 1;
2892 write_c0_cvmvmconfig(cvmvmconfig);
2893
2894
2895 current_cpu_data.tlbsize = mmu_size;
2896 current_cpu_data.tlbsizevtlb = mmu_size;
2897 current_cpu_data.guest.tlbsize = guest_mmu_size;
2898
2899
2900 kvm_vz_local_flush_guesttlb_all();
2901 break;
2902 default:
2903
2904
2905
2906
2907
2908 mmu_size = current_cpu_data.tlbsizevtlb;
2909 ftlb_size = current_cpu_data.tlbsize - mmu_size;
2910
2911
2912 guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size);
2913 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2914 kvm_vz_local_flush_guesttlb_all();
2915
2916
2917
2918
2919
2920
2921 guest_mmu_size = mmu_size - num_wired_entries() - 2;
2922 guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size);
2923 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2924
2925
2926
2927
2928
2929
2930
2931 if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) &&
2932 WARN(guest_mmu_size != kvm_vz_guest_vtlb_size,
2933 "Available guest VTLB size mismatch"))
2934 return -EINVAL;
2935 break;
2936 }
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947 write_c0_guestctl0(MIPS_GCTL0_CP0 |
2948 (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) |
2949 MIPS_GCTL0_CG | MIPS_GCTL0_CF);
2950 if (cpu_has_guestctl0ext) {
2951 if (current_cpu_type() != CPU_LOONGSON64)
2952 set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2953 else
2954 clear_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2955 }
2956
2957 if (cpu_has_guestid) {
2958 write_c0_guestctl1(0);
2959 kvm_vz_local_flush_roottlb_all_guests();
2960
2961 GUESTID_MASK = current_cpu_data.guestid_mask;
2962 GUESTID_FIRST_VERSION = GUESTID_MASK + 1;
2963 GUESTID_VERSION_MASK = ~GUESTID_MASK;
2964
2965 current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION;
2966 }
2967
2968
2969 if (cpu_has_guestctl2)
2970 clear_c0_guestctl2(0x3f << 10);
2971
2972#ifdef CONFIG_CPU_LOONGSON64
2973
2974 if (cpu_has_csr())
2975 csr_writel(csr_readl(0xffffffec) | 0x1, 0xffffffec);
2976#endif
2977
2978 return 0;
2979}
2980
2981static void kvm_vz_hardware_disable(void)
2982{
2983 u64 cvmvmconfig;
2984 unsigned int mmu_size;
2985
2986
2987 kvm_vz_local_flush_guesttlb_all();
2988
2989 switch (current_cpu_type()) {
2990 case CPU_CAVIUM_OCTEON3:
2991
2992
2993
2994
2995
2996 cvmvmconfig = read_c0_cvmvmconfig();
2997 mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
2998 >> CVMVMCONF_MMUSIZEM1_S) + 1;
2999 cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
3000 cvmvmconfig |= mmu_size - 1;
3001 write_c0_cvmvmconfig(cvmvmconfig);
3002
3003
3004 current_cpu_data.tlbsize = mmu_size;
3005 current_cpu_data.tlbsizevtlb = mmu_size;
3006 current_cpu_data.guest.tlbsize = 0;
3007
3008
3009 local_flush_tlb_all();
3010 break;
3011 }
3012
3013 if (cpu_has_guestid) {
3014 write_c0_guestctl1(0);
3015 kvm_vz_local_flush_roottlb_all_guests();
3016 }
3017}
3018
3019static int kvm_vz_check_extension(struct kvm *kvm, long ext)
3020{
3021 int r;
3022
3023 switch (ext) {
3024 case KVM_CAP_MIPS_VZ:
3025
3026 r = 1;
3027 break;
3028#ifdef CONFIG_64BIT
3029 case KVM_CAP_MIPS_64BIT:
3030
3031 r = 2;
3032 break;
3033#endif
3034 case KVM_CAP_IOEVENTFD:
3035 r = 1;
3036 break;
3037 default:
3038 r = 0;
3039 break;
3040 }
3041
3042 return r;
3043}
3044
3045static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu)
3046{
3047 int i;
3048
3049 for_each_possible_cpu(i)
3050 vcpu->arch.vzguestid[i] = 0;
3051
3052 return 0;
3053}
3054
3055static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
3056{
3057 int cpu;
3058
3059
3060
3061
3062
3063
3064 for_each_possible_cpu(cpu) {
3065 if (last_vcpu[cpu] == vcpu)
3066 last_vcpu[cpu] = NULL;
3067 if (last_exec_vcpu[cpu] == vcpu)
3068 last_exec_vcpu[cpu] = NULL;
3069 }
3070}
3071
3072static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
3073{
3074 struct mips_coproc *cop0 = vcpu->arch.cop0;
3075 unsigned long count_hz = 100*1000*1000;
3076
3077
3078
3079
3080
3081 if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC)
3082 count_hz = mips_hpt_frequency;
3083 kvm_mips_init_count(vcpu, count_hz);
3084
3085
3086
3087
3088
3089
3090 if (cpu_has_mips_r5 || cpu_has_mips_r6)
3091 kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC);
3092
3093 if (cpu_has_mips_r6)
3094 kvm_write_sw_gc0_wired(cop0,
3095 read_gc0_wired() & MIPSR6_WIRED_LIMIT);
3096
3097 kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL);
3098 if (cpu_has_mips_r5 || cpu_has_mips_r6)
3099 kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status());
3100
3101 kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() &
3102 (INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI));
3103
3104 kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id);
3105
3106 kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id);
3107
3108 kvm_save_gc0_config(cop0);
3109
3110 kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK,
3111 _page_cachable_default >> _CACHE_SHIFT);
3112
3113 kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config());
3114 if (cpu_guest_has_conf1) {
3115 kvm_set_sw_gc0_config(cop0, MIPS_CONF_M);
3116
3117 kvm_save_gc0_config1(cop0);
3118
3119 kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2 |
3120 MIPS_CONF1_MD |
3121 MIPS_CONF1_PC |
3122 MIPS_CONF1_WR |
3123 MIPS_CONF1_CA |
3124 MIPS_CONF1_FP);
3125 }
3126 if (cpu_guest_has_conf2) {
3127 kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M);
3128
3129 kvm_save_gc0_config2(cop0);
3130 }
3131 if (cpu_guest_has_conf3) {
3132 kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M);
3133
3134 kvm_save_gc0_config3(cop0);
3135
3136 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE);
3137
3138 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA |
3139 MIPS_CONF3_BPG |
3140 MIPS_CONF3_ULRI |
3141 MIPS_CONF3_DSP |
3142 MIPS_CONF3_CTXTC |
3143 MIPS_CONF3_ITL |
3144 MIPS_CONF3_LPA |
3145 MIPS_CONF3_VEIC |
3146 MIPS_CONF3_VINT |
3147 MIPS_CONF3_SP |
3148 MIPS_CONF3_CDMM |
3149 MIPS_CONF3_MT |
3150 MIPS_CONF3_SM |
3151 MIPS_CONF3_TL);
3152 }
3153 if (cpu_guest_has_conf4) {
3154 kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M);
3155
3156 kvm_save_gc0_config4(cop0);
3157 }
3158 if (cpu_guest_has_conf5) {
3159 kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M);
3160
3161 kvm_save_gc0_config5(cop0);
3162
3163 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K |
3164 MIPS_CONF5_CV |
3165 MIPS_CONF5_MSAEN |
3166 MIPS_CONF5_UFE |
3167 MIPS_CONF5_FRE |
3168 MIPS_CONF5_SBRI |
3169 MIPS_CONF5_UFR);
3170
3171 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP);
3172 }
3173
3174 if (cpu_guest_has_contextconfig) {
3175
3176 kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0);
3177#ifdef CONFIG_64BIT
3178
3179
3180 kvm_write_sw_gc0_xcontextconfig(cop0,
3181 ((1ull << (cpu_vmbits - 13)) - 1) << 4);
3182#endif
3183 }
3184
3185
3186 if (cpu_guest_has_segments) {
3187
3188 kvm_write_sw_gc0_segctl0(cop0, 0x00200010);
3189 kvm_write_sw_gc0_segctl1(cop0, 0x00000002 |
3190 (_page_cachable_default >> _CACHE_SHIFT) <<
3191 (16 + MIPS_SEGCFG_C_SHIFT));
3192 kvm_write_sw_gc0_segctl2(cop0, 0x00380438);
3193 }
3194
3195
3196 if (cpu_guest_has_htw && (cpu_has_mips_r5 || cpu_has_mips_r6)) {
3197
3198 kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302);
3199
3200 kvm_write_sw_gc0_pwsize(cop0, 1 << MIPS_PWSIZE_PTW_SHIFT);
3201 }
3202
3203
3204 if (cpu_has_guestctl2)
3205 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
3206
3207
3208 vcpu->arch.pc = CKSEG1ADDR(0x1fc00000);
3209
3210 return 0;
3211}
3212
3213static void kvm_vz_prepare_flush_shadow(struct kvm *kvm)
3214{
3215 if (!cpu_has_guestid) {
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225 cpumask_setall(&kvm->arch.asid_flush_mask);
3226 }
3227}
3228
3229static void kvm_vz_vcpu_reenter(struct kvm_vcpu *vcpu)
3230{
3231 int cpu = smp_processor_id();
3232 int preserve_guest_tlb;
3233
3234 preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu);
3235
3236 if (preserve_guest_tlb)
3237 kvm_vz_vcpu_save_wired(vcpu);
3238
3239 kvm_vz_vcpu_load_tlb(vcpu, cpu);
3240
3241 if (preserve_guest_tlb)
3242 kvm_vz_vcpu_load_wired(vcpu);
3243}
3244
3245static int kvm_vz_vcpu_run(struct kvm_vcpu *vcpu)
3246{
3247 int cpu = smp_processor_id();
3248 int r;
3249
3250 kvm_vz_acquire_htimer(vcpu);
3251
3252 kvm_mips_deliver_interrupts(vcpu, read_gc0_cause());
3253
3254 kvm_vz_check_requests(vcpu, cpu);
3255 kvm_vz_vcpu_load_tlb(vcpu, cpu);
3256 kvm_vz_vcpu_load_wired(vcpu);
3257
3258 r = vcpu->arch.vcpu_run(vcpu);
3259
3260 kvm_vz_vcpu_save_wired(vcpu);
3261
3262 return r;
3263}
3264
3265static struct kvm_mips_callbacks kvm_vz_callbacks = {
3266 .handle_cop_unusable = kvm_trap_vz_handle_cop_unusable,
3267 .handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss,
3268 .handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss,
3269 .handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss,
3270 .handle_addr_err_st = kvm_trap_vz_no_handler,
3271 .handle_addr_err_ld = kvm_trap_vz_no_handler,
3272 .handle_syscall = kvm_trap_vz_no_handler,
3273 .handle_res_inst = kvm_trap_vz_no_handler,
3274 .handle_break = kvm_trap_vz_no_handler,
3275 .handle_msa_disabled = kvm_trap_vz_handle_msa_disabled,
3276 .handle_guest_exit = kvm_trap_vz_handle_guest_exit,
3277
3278 .hardware_enable = kvm_vz_hardware_enable,
3279 .hardware_disable = kvm_vz_hardware_disable,
3280 .check_extension = kvm_vz_check_extension,
3281 .vcpu_init = kvm_vz_vcpu_init,
3282 .vcpu_uninit = kvm_vz_vcpu_uninit,
3283 .vcpu_setup = kvm_vz_vcpu_setup,
3284 .prepare_flush_shadow = kvm_vz_prepare_flush_shadow,
3285 .gva_to_gpa = kvm_vz_gva_to_gpa_cb,
3286 .queue_timer_int = kvm_vz_queue_timer_int_cb,
3287 .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb,
3288 .queue_io_int = kvm_vz_queue_io_int_cb,
3289 .dequeue_io_int = kvm_vz_dequeue_io_int_cb,
3290 .irq_deliver = kvm_vz_irq_deliver_cb,
3291 .irq_clear = kvm_vz_irq_clear_cb,
3292 .num_regs = kvm_vz_num_regs,
3293 .copy_reg_indices = kvm_vz_copy_reg_indices,
3294 .get_one_reg = kvm_vz_get_one_reg,
3295 .set_one_reg = kvm_vz_set_one_reg,
3296 .vcpu_load = kvm_vz_vcpu_load,
3297 .vcpu_put = kvm_vz_vcpu_put,
3298 .vcpu_run = kvm_vz_vcpu_run,
3299 .vcpu_reenter = kvm_vz_vcpu_reenter,
3300};
3301
3302int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
3303{
3304 if (!cpu_has_vz)
3305 return -ENODEV;
3306
3307
3308
3309
3310
3311 if (WARN(pgd_reg == -1,
3312 "pgd_reg not allocated even though cpu_has_vz\n"))
3313 return -ENODEV;
3314
3315 pr_info("Starting KVM with MIPS VZ extensions\n");
3316
3317 *install_callbacks = &kvm_vz_callbacks;
3318 return 0;
3319}
3320