1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include "x86.h"
22#include "lapic.h"
23#include "ioapic.h"
24#include "cpuid.h"
25#include "hyperv.h"
26#include "xen.h"
27
28#include <linux/cpu.h>
29#include <linux/kvm_host.h>
30#include <linux/highmem.h>
31#include <linux/sched/cputime.h>
32#include <linux/eventfd.h>
33
34#include <asm/apicdef.h>
35#include <trace/events/kvm.h>
36
37#include "trace.h"
38#include "irq.h"
39#include "fpu.h"
40
41
42#define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
43
44#define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64)
45
46static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
47 bool vcpu_kick);
48
49static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
50{
51 return atomic64_read(&synic->sint[sint]);
52}
53
54static inline int synic_get_sint_vector(u64 sint_value)
55{
56 if (sint_value & HV_SYNIC_SINT_MASKED)
57 return -1;
58 return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
59}
60
61static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
62 int vector)
63{
64 int i;
65
66 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
67 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
68 return true;
69 }
70 return false;
71}
72
73static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
74 int vector)
75{
76 int i;
77 u64 sint_value;
78
79 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
80 sint_value = synic_read_sint(synic, i);
81 if (synic_get_sint_vector(sint_value) == vector &&
82 sint_value & HV_SYNIC_SINT_AUTO_EOI)
83 return true;
84 }
85 return false;
86}
87
88static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
89 int vector)
90{
91 if (vector < HV_SYNIC_FIRST_VALID_VECTOR)
92 return;
93
94 if (synic_has_vector_connected(synic, vector))
95 __set_bit(vector, synic->vec_bitmap);
96 else
97 __clear_bit(vector, synic->vec_bitmap);
98
99 if (synic_has_vector_auto_eoi(synic, vector))
100 __set_bit(vector, synic->auto_eoi_bitmap);
101 else
102 __clear_bit(vector, synic->auto_eoi_bitmap);
103}
104
105static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
106 u64 data, bool host)
107{
108 int vector, old_vector;
109 bool masked;
110
111 vector = data & HV_SYNIC_SINT_VECTOR_MASK;
112 masked = data & HV_SYNIC_SINT_MASKED;
113
114
115
116
117
118
119 if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked)
120 return 1;
121
122
123
124
125
126
127 old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK;
128
129 atomic64_set(&synic->sint[sint], data);
130
131 synic_update_vector(synic, old_vector);
132
133 synic_update_vector(synic, vector);
134
135
136 kvm_make_request(KVM_REQ_SCAN_IOAPIC, hv_synic_to_vcpu(synic));
137 return 0;
138}
139
140static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
141{
142 struct kvm_vcpu *vcpu = NULL;
143 int i;
144
145 if (vpidx >= KVM_MAX_VCPUS)
146 return NULL;
147
148 vcpu = kvm_get_vcpu(kvm, vpidx);
149 if (vcpu && kvm_hv_get_vpindex(vcpu) == vpidx)
150 return vcpu;
151 kvm_for_each_vcpu(i, vcpu, kvm)
152 if (kvm_hv_get_vpindex(vcpu) == vpidx)
153 return vcpu;
154 return NULL;
155}
156
157static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
158{
159 struct kvm_vcpu *vcpu;
160 struct kvm_vcpu_hv_synic *synic;
161
162 vcpu = get_vcpu_by_vpidx(kvm, vpidx);
163 if (!vcpu || !to_hv_vcpu(vcpu))
164 return NULL;
165 synic = to_hv_synic(vcpu);
166 return (synic->active) ? synic : NULL;
167}
168
169static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
170{
171 struct kvm *kvm = vcpu->kvm;
172 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
173 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
174 struct kvm_vcpu_hv_stimer *stimer;
175 int gsi, idx;
176
177 trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
178
179
180 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
181 stimer = &hv_vcpu->stimer[idx];
182 if (stimer->msg_pending && stimer->config.enable &&
183 !stimer->config.direct_mode &&
184 stimer->config.sintx == sint)
185 stimer_mark_pending(stimer, false);
186 }
187
188 idx = srcu_read_lock(&kvm->irq_srcu);
189 gsi = atomic_read(&synic->sint_to_gsi[sint]);
190 if (gsi != -1)
191 kvm_notify_acked_gsi(kvm, gsi);
192 srcu_read_unlock(&kvm->irq_srcu, idx);
193}
194
195static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
196{
197 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
198 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
199
200 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
201 hv_vcpu->exit.u.synic.msr = msr;
202 hv_vcpu->exit.u.synic.control = synic->control;
203 hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
204 hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
205
206 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
207}
208
209static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
210 u32 msr, u64 data, bool host)
211{
212 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
213 int ret;
214
215 if (!synic->active && !host)
216 return 1;
217
218 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
219
220 ret = 0;
221 switch (msr) {
222 case HV_X64_MSR_SCONTROL:
223 synic->control = data;
224 if (!host)
225 synic_exit(synic, msr);
226 break;
227 case HV_X64_MSR_SVERSION:
228 if (!host) {
229 ret = 1;
230 break;
231 }
232 synic->version = data;
233 break;
234 case HV_X64_MSR_SIEFP:
235 if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
236 !synic->dont_zero_synic_pages)
237 if (kvm_clear_guest(vcpu->kvm,
238 data & PAGE_MASK, PAGE_SIZE)) {
239 ret = 1;
240 break;
241 }
242 synic->evt_page = data;
243 if (!host)
244 synic_exit(synic, msr);
245 break;
246 case HV_X64_MSR_SIMP:
247 if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
248 !synic->dont_zero_synic_pages)
249 if (kvm_clear_guest(vcpu->kvm,
250 data & PAGE_MASK, PAGE_SIZE)) {
251 ret = 1;
252 break;
253 }
254 synic->msg_page = data;
255 if (!host)
256 synic_exit(synic, msr);
257 break;
258 case HV_X64_MSR_EOM: {
259 int i;
260
261 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
262 kvm_hv_notify_acked_sint(vcpu, i);
263 break;
264 }
265 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
266 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
267 break;
268 default:
269 ret = 1;
270 break;
271 }
272 return ret;
273}
274
275static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
276{
277 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
278
279 return hv_vcpu->cpuid_cache.syndbg_cap_eax &
280 HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
281}
282
283static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
284{
285 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
286
287 if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL)
288 hv->hv_syndbg.control.status =
289 vcpu->run->hyperv.u.syndbg.status;
290 return 1;
291}
292
293static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr)
294{
295 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
296 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
297
298 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG;
299 hv_vcpu->exit.u.syndbg.msr = msr;
300 hv_vcpu->exit.u.syndbg.control = syndbg->control.control;
301 hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page;
302 hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page;
303 hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page;
304 vcpu->arch.complete_userspace_io =
305 kvm_hv_syndbg_complete_userspace;
306
307 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
308}
309
310static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
311{
312 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
313
314 if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
315 return 1;
316
317 trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id,
318 to_hv_vcpu(vcpu)->vp_index, msr, data);
319 switch (msr) {
320 case HV_X64_MSR_SYNDBG_CONTROL:
321 syndbg->control.control = data;
322 if (!host)
323 syndbg_exit(vcpu, msr);
324 break;
325 case HV_X64_MSR_SYNDBG_STATUS:
326 syndbg->control.status = data;
327 break;
328 case HV_X64_MSR_SYNDBG_SEND_BUFFER:
329 syndbg->control.send_page = data;
330 break;
331 case HV_X64_MSR_SYNDBG_RECV_BUFFER:
332 syndbg->control.recv_page = data;
333 break;
334 case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
335 syndbg->control.pending_page = data;
336 if (!host)
337 syndbg_exit(vcpu, msr);
338 break;
339 case HV_X64_MSR_SYNDBG_OPTIONS:
340 syndbg->options = data;
341 break;
342 default:
343 break;
344 }
345
346 return 0;
347}
348
349static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
350{
351 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
352
353 if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
354 return 1;
355
356 switch (msr) {
357 case HV_X64_MSR_SYNDBG_CONTROL:
358 *pdata = syndbg->control.control;
359 break;
360 case HV_X64_MSR_SYNDBG_STATUS:
361 *pdata = syndbg->control.status;
362 break;
363 case HV_X64_MSR_SYNDBG_SEND_BUFFER:
364 *pdata = syndbg->control.send_page;
365 break;
366 case HV_X64_MSR_SYNDBG_RECV_BUFFER:
367 *pdata = syndbg->control.recv_page;
368 break;
369 case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
370 *pdata = syndbg->control.pending_page;
371 break;
372 case HV_X64_MSR_SYNDBG_OPTIONS:
373 *pdata = syndbg->options;
374 break;
375 default:
376 break;
377 }
378
379 trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata);
380
381 return 0;
382}
383
384static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
385 bool host)
386{
387 int ret;
388
389 if (!synic->active && !host)
390 return 1;
391
392 ret = 0;
393 switch (msr) {
394 case HV_X64_MSR_SCONTROL:
395 *pdata = synic->control;
396 break;
397 case HV_X64_MSR_SVERSION:
398 *pdata = synic->version;
399 break;
400 case HV_X64_MSR_SIEFP:
401 *pdata = synic->evt_page;
402 break;
403 case HV_X64_MSR_SIMP:
404 *pdata = synic->msg_page;
405 break;
406 case HV_X64_MSR_EOM:
407 *pdata = 0;
408 break;
409 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
410 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
411 break;
412 default:
413 ret = 1;
414 break;
415 }
416 return ret;
417}
418
419static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
420{
421 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
422 struct kvm_lapic_irq irq;
423 int ret, vector;
424
425 if (sint >= ARRAY_SIZE(synic->sint))
426 return -EINVAL;
427
428 vector = synic_get_sint_vector(synic_read_sint(synic, sint));
429 if (vector < 0)
430 return -ENOENT;
431
432 memset(&irq, 0, sizeof(irq));
433 irq.shorthand = APIC_DEST_SELF;
434 irq.dest_mode = APIC_DEST_PHYSICAL;
435 irq.delivery_mode = APIC_DM_FIXED;
436 irq.vector = vector;
437 irq.level = 1;
438
439 ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
440 trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
441 return ret;
442}
443
444int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
445{
446 struct kvm_vcpu_hv_synic *synic;
447
448 synic = synic_get(kvm, vpidx);
449 if (!synic)
450 return -EINVAL;
451
452 return synic_set_irq(synic, sint);
453}
454
455void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
456{
457 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
458 int i;
459
460 trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
461
462 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
463 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
464 kvm_hv_notify_acked_sint(vcpu, i);
465}
466
467static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
468{
469 struct kvm_vcpu_hv_synic *synic;
470
471 synic = synic_get(kvm, vpidx);
472 if (!synic)
473 return -EINVAL;
474
475 if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
476 return -EINVAL;
477
478 atomic_set(&synic->sint_to_gsi[sint], gsi);
479 return 0;
480}
481
482void kvm_hv_irq_routing_update(struct kvm *kvm)
483{
484 struct kvm_irq_routing_table *irq_rt;
485 struct kvm_kernel_irq_routing_entry *e;
486 u32 gsi;
487
488 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
489 lockdep_is_held(&kvm->irq_lock));
490
491 for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
492 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
493 if (e->type == KVM_IRQ_ROUTING_HV_SINT)
494 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
495 e->hv_sint.sint, gsi);
496 }
497 }
498}
499
500static void synic_init(struct kvm_vcpu_hv_synic *synic)
501{
502 int i;
503
504 memset(synic, 0, sizeof(*synic));
505 synic->version = HV_SYNIC_VERSION_1;
506 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
507 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
508 atomic_set(&synic->sint_to_gsi[i], -1);
509 }
510}
511
512static u64 get_time_ref_counter(struct kvm *kvm)
513{
514 struct kvm_hv *hv = to_kvm_hv(kvm);
515 struct kvm_vcpu *vcpu;
516 u64 tsc;
517
518
519
520
521
522 if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET)
523 return div_u64(get_kvmclock_ns(kvm), 100);
524
525 vcpu = kvm_get_vcpu(kvm, 0);
526 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
527 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
528 + hv->tsc_ref.tsc_offset;
529}
530
531static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
532 bool vcpu_kick)
533{
534 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
535
536 set_bit(stimer->index,
537 to_hv_vcpu(vcpu)->stimer_pending_bitmap);
538 kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
539 if (vcpu_kick)
540 kvm_vcpu_kick(vcpu);
541}
542
543static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
544{
545 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
546
547 trace_kvm_hv_stimer_cleanup(hv_stimer_to_vcpu(stimer)->vcpu_id,
548 stimer->index);
549
550 hrtimer_cancel(&stimer->timer);
551 clear_bit(stimer->index,
552 to_hv_vcpu(vcpu)->stimer_pending_bitmap);
553 stimer->msg_pending = false;
554 stimer->exp_time = 0;
555}
556
557static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
558{
559 struct kvm_vcpu_hv_stimer *stimer;
560
561 stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
562 trace_kvm_hv_stimer_callback(hv_stimer_to_vcpu(stimer)->vcpu_id,
563 stimer->index);
564 stimer_mark_pending(stimer, true);
565
566 return HRTIMER_NORESTART;
567}
568
569
570
571
572
573
574static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
575{
576 u64 time_now;
577 ktime_t ktime_now;
578
579 time_now = get_time_ref_counter(hv_stimer_to_vcpu(stimer)->kvm);
580 ktime_now = ktime_get();
581
582 if (stimer->config.periodic) {
583 if (stimer->exp_time) {
584 if (time_now >= stimer->exp_time) {
585 u64 remainder;
586
587 div64_u64_rem(time_now - stimer->exp_time,
588 stimer->count, &remainder);
589 stimer->exp_time =
590 time_now + (stimer->count - remainder);
591 }
592 } else
593 stimer->exp_time = time_now + stimer->count;
594
595 trace_kvm_hv_stimer_start_periodic(
596 hv_stimer_to_vcpu(stimer)->vcpu_id,
597 stimer->index,
598 time_now, stimer->exp_time);
599
600 hrtimer_start(&stimer->timer,
601 ktime_add_ns(ktime_now,
602 100 * (stimer->exp_time - time_now)),
603 HRTIMER_MODE_ABS);
604 return 0;
605 }
606 stimer->exp_time = stimer->count;
607 if (time_now >= stimer->count) {
608
609
610
611
612
613
614 stimer_mark_pending(stimer, false);
615 return 0;
616 }
617
618 trace_kvm_hv_stimer_start_one_shot(hv_stimer_to_vcpu(stimer)->vcpu_id,
619 stimer->index,
620 time_now, stimer->count);
621
622 hrtimer_start(&stimer->timer,
623 ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
624 HRTIMER_MODE_ABS);
625 return 0;
626}
627
628static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
629 bool host)
630{
631 union hv_stimer_config new_config = {.as_uint64 = config},
632 old_config = {.as_uint64 = stimer->config.as_uint64};
633 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
634 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
635 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
636
637 if (!synic->active && !host)
638 return 1;
639
640 if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode &&
641 !(hv_vcpu->cpuid_cache.features_edx &
642 HV_STIMER_DIRECT_MODE_AVAILABLE)))
643 return 1;
644
645 trace_kvm_hv_stimer_set_config(hv_stimer_to_vcpu(stimer)->vcpu_id,
646 stimer->index, config, host);
647
648 stimer_cleanup(stimer);
649 if (old_config.enable &&
650 !new_config.direct_mode && new_config.sintx == 0)
651 new_config.enable = 0;
652 stimer->config.as_uint64 = new_config.as_uint64;
653
654 if (stimer->config.enable)
655 stimer_mark_pending(stimer, false);
656
657 return 0;
658}
659
660static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
661 bool host)
662{
663 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
664 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
665
666 if (!synic->active && !host)
667 return 1;
668
669 trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id,
670 stimer->index, count, host);
671
672 stimer_cleanup(stimer);
673 stimer->count = count;
674 if (stimer->count == 0)
675 stimer->config.enable = 0;
676 else if (stimer->config.auto_enable)
677 stimer->config.enable = 1;
678
679 if (stimer->config.enable)
680 stimer_mark_pending(stimer, false);
681
682 return 0;
683}
684
685static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
686{
687 *pconfig = stimer->config.as_uint64;
688 return 0;
689}
690
691static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
692{
693 *pcount = stimer->count;
694 return 0;
695}
696
697static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
698 struct hv_message *src_msg, bool no_retry)
699{
700 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
701 int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
702 gfn_t msg_page_gfn;
703 struct hv_message_header hv_hdr;
704 int r;
705
706 if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
707 return -ENOENT;
708
709 msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
710
711
712
713
714
715
716
717 r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
718 msg_off + offsetof(struct hv_message,
719 header.message_type),
720 sizeof(hv_hdr.message_type));
721 if (r < 0)
722 return r;
723
724 if (hv_hdr.message_type != HVMSG_NONE) {
725 if (no_retry)
726 return 0;
727
728 hv_hdr.message_flags.msg_pending = 1;
729 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn,
730 &hv_hdr.message_flags,
731 msg_off +
732 offsetof(struct hv_message,
733 header.message_flags),
734 sizeof(hv_hdr.message_flags));
735 if (r < 0)
736 return r;
737 return -EAGAIN;
738 }
739
740 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off,
741 sizeof(src_msg->header) +
742 src_msg->header.payload_size);
743 if (r < 0)
744 return r;
745
746 r = synic_set_irq(synic, sint);
747 if (r < 0)
748 return r;
749 if (r == 0)
750 return -EFAULT;
751 return 0;
752}
753
754static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
755{
756 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
757 struct hv_message *msg = &stimer->msg;
758 struct hv_timer_message_payload *payload =
759 (struct hv_timer_message_payload *)&msg->u.payload;
760
761
762
763
764
765 bool no_retry = stimer->config.periodic;
766
767 payload->expiration_time = stimer->exp_time;
768 payload->delivery_time = get_time_ref_counter(vcpu->kvm);
769 return synic_deliver_msg(to_hv_synic(vcpu),
770 stimer->config.sintx, msg,
771 no_retry);
772}
773
774static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
775{
776 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
777 struct kvm_lapic_irq irq = {
778 .delivery_mode = APIC_DM_FIXED,
779 .vector = stimer->config.apic_vector
780 };
781
782 if (lapic_in_kernel(vcpu))
783 return !kvm_apic_set_irq(vcpu, &irq, NULL);
784 return 0;
785}
786
787static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
788{
789 int r, direct = stimer->config.direct_mode;
790
791 stimer->msg_pending = true;
792 if (!direct)
793 r = stimer_send_msg(stimer);
794 else
795 r = stimer_notify_direct(stimer);
796 trace_kvm_hv_stimer_expiration(hv_stimer_to_vcpu(stimer)->vcpu_id,
797 stimer->index, direct, r);
798 if (!r) {
799 stimer->msg_pending = false;
800 if (!(stimer->config.periodic))
801 stimer->config.enable = 0;
802 }
803}
804
805void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
806{
807 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
808 struct kvm_vcpu_hv_stimer *stimer;
809 u64 time_now, exp_time;
810 int i;
811
812 if (!hv_vcpu)
813 return;
814
815 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
816 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
817 stimer = &hv_vcpu->stimer[i];
818 if (stimer->config.enable) {
819 exp_time = stimer->exp_time;
820
821 if (exp_time) {
822 time_now =
823 get_time_ref_counter(vcpu->kvm);
824 if (time_now >= exp_time)
825 stimer_expiration(stimer);
826 }
827
828 if ((stimer->config.enable) &&
829 stimer->count) {
830 if (!stimer->msg_pending)
831 stimer_start(stimer);
832 } else
833 stimer_cleanup(stimer);
834 }
835 }
836}
837
838void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
839{
840 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
841 int i;
842
843 if (!hv_vcpu)
844 return;
845
846 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
847 stimer_cleanup(&hv_vcpu->stimer[i]);
848
849 kfree(hv_vcpu);
850 vcpu->arch.hyperv = NULL;
851}
852
853bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
854{
855 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
856
857 if (!hv_vcpu)
858 return false;
859
860 if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
861 return false;
862 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
863}
864EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
865
866bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
867 struct hv_vp_assist_page *assist_page)
868{
869 if (!kvm_hv_assist_page_enabled(vcpu))
870 return false;
871 return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
872 assist_page, sizeof(*assist_page));
873}
874EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
875
876static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
877{
878 struct hv_message *msg = &stimer->msg;
879 struct hv_timer_message_payload *payload =
880 (struct hv_timer_message_payload *)&msg->u.payload;
881
882 memset(&msg->header, 0, sizeof(msg->header));
883 msg->header.message_type = HVMSG_TIMER_EXPIRED;
884 msg->header.payload_size = sizeof(*payload);
885
886 payload->timer_index = stimer->index;
887 payload->expiration_time = 0;
888 payload->delivery_time = 0;
889}
890
891static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
892{
893 memset(stimer, 0, sizeof(*stimer));
894 stimer->index = timer_index;
895 hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
896 stimer->timer.function = stimer_timer_callback;
897 stimer_prepare_msg(stimer);
898}
899
900static int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
901{
902 struct kvm_vcpu_hv *hv_vcpu;
903 int i;
904
905 hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT);
906 if (!hv_vcpu)
907 return -ENOMEM;
908
909 vcpu->arch.hyperv = hv_vcpu;
910 hv_vcpu->vcpu = vcpu;
911
912 synic_init(&hv_vcpu->synic);
913
914 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
915 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
916 stimer_init(&hv_vcpu->stimer[i], i);
917
918 hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
919
920 return 0;
921}
922
923int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
924{
925 struct kvm_vcpu_hv_synic *synic;
926 int r;
927
928 if (!to_hv_vcpu(vcpu)) {
929 r = kvm_hv_vcpu_init(vcpu);
930 if (r)
931 return r;
932 }
933
934 synic = to_hv_synic(vcpu);
935
936
937
938
939
940
941 kvm_request_apicv_update(vcpu->kvm, false, APICV_INHIBIT_REASON_HYPERV);
942 synic->active = true;
943 synic->dont_zero_synic_pages = dont_zero_synic_pages;
944 synic->control = HV_SYNIC_CONTROL_ENABLE;
945 return 0;
946}
947
948static bool kvm_hv_msr_partition_wide(u32 msr)
949{
950 bool r = false;
951
952 switch (msr) {
953 case HV_X64_MSR_GUEST_OS_ID:
954 case HV_X64_MSR_HYPERCALL:
955 case HV_X64_MSR_REFERENCE_TSC:
956 case HV_X64_MSR_TIME_REF_COUNT:
957 case HV_X64_MSR_CRASH_CTL:
958 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
959 case HV_X64_MSR_RESET:
960 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
961 case HV_X64_MSR_TSC_EMULATION_CONTROL:
962 case HV_X64_MSR_TSC_EMULATION_STATUS:
963 case HV_X64_MSR_SYNDBG_OPTIONS:
964 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
965 r = true;
966 break;
967 }
968
969 return r;
970}
971
972static int kvm_hv_msr_get_crash_data(struct kvm *kvm, u32 index, u64 *pdata)
973{
974 struct kvm_hv *hv = to_kvm_hv(kvm);
975 size_t size = ARRAY_SIZE(hv->hv_crash_param);
976
977 if (WARN_ON_ONCE(index >= size))
978 return -EINVAL;
979
980 *pdata = hv->hv_crash_param[array_index_nospec(index, size)];
981 return 0;
982}
983
984static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata)
985{
986 struct kvm_hv *hv = to_kvm_hv(kvm);
987
988 *pdata = hv->hv_crash_ctl;
989 return 0;
990}
991
992static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data)
993{
994 struct kvm_hv *hv = to_kvm_hv(kvm);
995
996 hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
997
998 return 0;
999}
1000
1001static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data)
1002{
1003 struct kvm_hv *hv = to_kvm_hv(kvm);
1004 size_t size = ARRAY_SIZE(hv->hv_crash_param);
1005
1006 if (WARN_ON_ONCE(index >= size))
1007 return -EINVAL;
1008
1009 hv->hv_crash_param[array_index_nospec(index, size)] = data;
1010 return 0;
1011}
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
1049 struct ms_hyperv_tsc_page *tsc_ref)
1050{
1051 u64 max_mul;
1052
1053 if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
1054 return false;
1055
1056
1057
1058
1059
1060
1061
1062 max_mul = 100ull << (32 - hv_clock->tsc_shift);
1063 if (hv_clock->tsc_to_system_mul >= max_mul)
1064 return false;
1065
1066
1067
1068
1069
1070 tsc_ref->tsc_scale =
1071 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
1072 hv_clock->tsc_to_system_mul,
1073 100);
1074
1075 tsc_ref->tsc_offset = hv_clock->system_time;
1076 do_div(tsc_ref->tsc_offset, 100);
1077 tsc_ref->tsc_offset -=
1078 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
1079 return true;
1080}
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091static inline bool tsc_page_update_unsafe(struct kvm_hv *hv)
1092{
1093 return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) &&
1094 hv->hv_tsc_emulation_control;
1095}
1096
1097void kvm_hv_setup_tsc_page(struct kvm *kvm,
1098 struct pvclock_vcpu_time_info *hv_clock)
1099{
1100 struct kvm_hv *hv = to_kvm_hv(kvm);
1101 u32 tsc_seq;
1102 u64 gfn;
1103
1104 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
1105 BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
1106
1107 if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
1108 hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
1109 return;
1110
1111 mutex_lock(&hv->hv_lock);
1112 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1113 goto out_unlock;
1114
1115 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1116
1117
1118
1119
1120 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
1121 &tsc_seq, sizeof(tsc_seq))))
1122 goto out_err;
1123
1124 if (tsc_seq && tsc_page_update_unsafe(hv)) {
1125 if (kvm_read_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1126 goto out_err;
1127
1128 hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
1129 goto out_unlock;
1130 }
1131
1132
1133
1134
1135
1136 hv->tsc_ref.tsc_sequence = 0;
1137 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1138 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1139 goto out_err;
1140
1141 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
1142 goto out_err;
1143
1144
1145 smp_wmb();
1146 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1147 goto out_err;
1148
1149
1150
1151
1152 tsc_seq++;
1153 if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
1154 tsc_seq = 1;
1155
1156
1157 smp_wmb();
1158
1159 hv->tsc_ref.tsc_sequence = tsc_seq;
1160 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1161 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1162 goto out_err;
1163
1164 hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
1165 goto out_unlock;
1166
1167out_err:
1168 hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
1169out_unlock:
1170 mutex_unlock(&hv->hv_lock);
1171}
1172
1173void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
1174{
1175 struct kvm_hv *hv = to_kvm_hv(kvm);
1176 u64 gfn;
1177 int idx;
1178
1179 if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
1180 hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
1181 tsc_page_update_unsafe(hv))
1182 return;
1183
1184 mutex_lock(&hv->hv_lock);
1185
1186 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1187 goto out_unlock;
1188
1189
1190 if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET)
1191 hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING;
1192
1193 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1194
1195 hv->tsc_ref.tsc_sequence = 0;
1196
1197
1198
1199
1200
1201 idx = srcu_read_lock(&kvm->srcu);
1202 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1203 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1204 hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
1205 srcu_read_unlock(&kvm->srcu, idx);
1206
1207out_unlock:
1208 mutex_unlock(&hv->hv_lock);
1209}
1210
1211
1212static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
1213{
1214 if (!hv_vcpu->enforce_cpuid)
1215 return true;
1216
1217 switch (msr) {
1218 case HV_X64_MSR_GUEST_OS_ID:
1219 case HV_X64_MSR_HYPERCALL:
1220 return hv_vcpu->cpuid_cache.features_eax &
1221 HV_MSR_HYPERCALL_AVAILABLE;
1222 case HV_X64_MSR_VP_RUNTIME:
1223 return hv_vcpu->cpuid_cache.features_eax &
1224 HV_MSR_VP_RUNTIME_AVAILABLE;
1225 case HV_X64_MSR_TIME_REF_COUNT:
1226 return hv_vcpu->cpuid_cache.features_eax &
1227 HV_MSR_TIME_REF_COUNT_AVAILABLE;
1228 case HV_X64_MSR_VP_INDEX:
1229 return hv_vcpu->cpuid_cache.features_eax &
1230 HV_MSR_VP_INDEX_AVAILABLE;
1231 case HV_X64_MSR_RESET:
1232 return hv_vcpu->cpuid_cache.features_eax &
1233 HV_MSR_RESET_AVAILABLE;
1234 case HV_X64_MSR_REFERENCE_TSC:
1235 return hv_vcpu->cpuid_cache.features_eax &
1236 HV_MSR_REFERENCE_TSC_AVAILABLE;
1237 case HV_X64_MSR_SCONTROL:
1238 case HV_X64_MSR_SVERSION:
1239 case HV_X64_MSR_SIEFP:
1240 case HV_X64_MSR_SIMP:
1241 case HV_X64_MSR_EOM:
1242 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1243 return hv_vcpu->cpuid_cache.features_eax &
1244 HV_MSR_SYNIC_AVAILABLE;
1245 case HV_X64_MSR_STIMER0_CONFIG:
1246 case HV_X64_MSR_STIMER1_CONFIG:
1247 case HV_X64_MSR_STIMER2_CONFIG:
1248 case HV_X64_MSR_STIMER3_CONFIG:
1249 case HV_X64_MSR_STIMER0_COUNT:
1250 case HV_X64_MSR_STIMER1_COUNT:
1251 case HV_X64_MSR_STIMER2_COUNT:
1252 case HV_X64_MSR_STIMER3_COUNT:
1253 return hv_vcpu->cpuid_cache.features_eax &
1254 HV_MSR_SYNTIMER_AVAILABLE;
1255 case HV_X64_MSR_EOI:
1256 case HV_X64_MSR_ICR:
1257 case HV_X64_MSR_TPR:
1258 case HV_X64_MSR_VP_ASSIST_PAGE:
1259 return hv_vcpu->cpuid_cache.features_eax &
1260 HV_MSR_APIC_ACCESS_AVAILABLE;
1261 break;
1262 case HV_X64_MSR_TSC_FREQUENCY:
1263 case HV_X64_MSR_APIC_FREQUENCY:
1264 return hv_vcpu->cpuid_cache.features_eax &
1265 HV_ACCESS_FREQUENCY_MSRS;
1266 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1267 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1268 case HV_X64_MSR_TSC_EMULATION_STATUS:
1269 return hv_vcpu->cpuid_cache.features_eax &
1270 HV_ACCESS_REENLIGHTENMENT;
1271 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1272 case HV_X64_MSR_CRASH_CTL:
1273 return hv_vcpu->cpuid_cache.features_edx &
1274 HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
1275 case HV_X64_MSR_SYNDBG_OPTIONS:
1276 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1277 return hv_vcpu->cpuid_cache.features_edx &
1278 HV_FEATURE_DEBUG_MSRS_AVAILABLE;
1279 default:
1280 break;
1281 }
1282
1283 return false;
1284}
1285
1286static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
1287 bool host)
1288{
1289 struct kvm *kvm = vcpu->kvm;
1290 struct kvm_hv *hv = to_kvm_hv(kvm);
1291
1292 if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
1293 return 1;
1294
1295 switch (msr) {
1296 case HV_X64_MSR_GUEST_OS_ID:
1297 hv->hv_guest_os_id = data;
1298
1299 if (!hv->hv_guest_os_id)
1300 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1301 break;
1302 case HV_X64_MSR_HYPERCALL: {
1303 u8 instructions[9];
1304 int i = 0;
1305 u64 addr;
1306
1307
1308 if (!hv->hv_guest_os_id)
1309 break;
1310 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1311 hv->hv_hypercall = data;
1312 break;
1313 }
1314
1315
1316
1317
1318
1319
1320
1321 if (kvm_xen_hypercall_enabled(kvm)) {
1322
1323 instructions[i++] = 0x0d;
1324 instructions[i++] = 0x00;
1325 instructions[i++] = 0x00;
1326 instructions[i++] = 0x00;
1327 instructions[i++] = 0x80;
1328 }
1329
1330
1331 static_call(kvm_x86_patch_hypercall)(vcpu, instructions + i);
1332 i += 3;
1333
1334
1335 ((unsigned char *)instructions)[i++] = 0xc3;
1336
1337 addr = data & HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK;
1338 if (kvm_vcpu_write_guest(vcpu, addr, instructions, i))
1339 return 1;
1340 hv->hv_hypercall = data;
1341 break;
1342 }
1343 case HV_X64_MSR_REFERENCE_TSC:
1344 hv->hv_tsc_page = data;
1345 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) {
1346 if (!host)
1347 hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED;
1348 else
1349 hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
1350 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1351 } else {
1352 hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET;
1353 }
1354 break;
1355 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1356 return kvm_hv_msr_set_crash_data(kvm,
1357 msr - HV_X64_MSR_CRASH_P0,
1358 data);
1359 case HV_X64_MSR_CRASH_CTL:
1360 if (host)
1361 return kvm_hv_msr_set_crash_ctl(kvm, data);
1362
1363 if (data & HV_CRASH_CTL_CRASH_NOTIFY) {
1364 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
1365 hv->hv_crash_param[0],
1366 hv->hv_crash_param[1],
1367 hv->hv_crash_param[2],
1368 hv->hv_crash_param[3],
1369 hv->hv_crash_param[4]);
1370
1371
1372 kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
1373 }
1374 break;
1375 case HV_X64_MSR_RESET:
1376 if (data == 1) {
1377 vcpu_debug(vcpu, "hyper-v reset requested\n");
1378 kvm_make_request(KVM_REQ_HV_RESET, vcpu);
1379 }
1380 break;
1381 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1382 hv->hv_reenlightenment_control = data;
1383 break;
1384 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1385 hv->hv_tsc_emulation_control = data;
1386 break;
1387 case HV_X64_MSR_TSC_EMULATION_STATUS:
1388 if (data && !host)
1389 return 1;
1390
1391 hv->hv_tsc_emulation_status = data;
1392 break;
1393 case HV_X64_MSR_TIME_REF_COUNT:
1394
1395 if (!host)
1396 return 1;
1397 break;
1398 case HV_X64_MSR_SYNDBG_OPTIONS:
1399 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1400 return syndbg_set_msr(vcpu, msr, data, host);
1401 default:
1402 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
1403 msr, data);
1404 return 1;
1405 }
1406 return 0;
1407}
1408
1409
1410static u64 current_task_runtime_100ns(void)
1411{
1412 u64 utime, stime;
1413
1414 task_cputime_adjusted(current, &utime, &stime);
1415
1416 return div_u64(utime + stime, 100);
1417}
1418
1419static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1420{
1421 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1422
1423 if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1424 return 1;
1425
1426 switch (msr) {
1427 case HV_X64_MSR_VP_INDEX: {
1428 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1429 int vcpu_idx = kvm_vcpu_get_idx(vcpu);
1430 u32 new_vp_index = (u32)data;
1431
1432 if (!host || new_vp_index >= KVM_MAX_VCPUS)
1433 return 1;
1434
1435 if (new_vp_index == hv_vcpu->vp_index)
1436 return 0;
1437
1438
1439
1440
1441
1442
1443
1444 if (hv_vcpu->vp_index == vcpu_idx)
1445 atomic_inc(&hv->num_mismatched_vp_indexes);
1446 else if (new_vp_index == vcpu_idx)
1447 atomic_dec(&hv->num_mismatched_vp_indexes);
1448
1449 hv_vcpu->vp_index = new_vp_index;
1450 break;
1451 }
1452 case HV_X64_MSR_VP_ASSIST_PAGE: {
1453 u64 gfn;
1454 unsigned long addr;
1455
1456 if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
1457 hv_vcpu->hv_vapic = data;
1458 if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0))
1459 return 1;
1460 break;
1461 }
1462 gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT;
1463 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
1464 if (kvm_is_error_hva(addr))
1465 return 1;
1466
1467
1468
1469
1470
1471
1472 if (__put_user(0, (u32 __user *)addr))
1473 return 1;
1474 hv_vcpu->hv_vapic = data;
1475 kvm_vcpu_mark_page_dirty(vcpu, gfn);
1476 if (kvm_lapic_enable_pv_eoi(vcpu,
1477 gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
1478 sizeof(struct hv_vp_assist_page)))
1479 return 1;
1480 break;
1481 }
1482 case HV_X64_MSR_EOI:
1483 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1484 case HV_X64_MSR_ICR:
1485 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1486 case HV_X64_MSR_TPR:
1487 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1488 case HV_X64_MSR_VP_RUNTIME:
1489 if (!host)
1490 return 1;
1491 hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
1492 break;
1493 case HV_X64_MSR_SCONTROL:
1494 case HV_X64_MSR_SVERSION:
1495 case HV_X64_MSR_SIEFP:
1496 case HV_X64_MSR_SIMP:
1497 case HV_X64_MSR_EOM:
1498 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1499 return synic_set_msr(to_hv_synic(vcpu), msr, data, host);
1500 case HV_X64_MSR_STIMER0_CONFIG:
1501 case HV_X64_MSR_STIMER1_CONFIG:
1502 case HV_X64_MSR_STIMER2_CONFIG:
1503 case HV_X64_MSR_STIMER3_CONFIG: {
1504 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1505
1506 return stimer_set_config(to_hv_stimer(vcpu, timer_index),
1507 data, host);
1508 }
1509 case HV_X64_MSR_STIMER0_COUNT:
1510 case HV_X64_MSR_STIMER1_COUNT:
1511 case HV_X64_MSR_STIMER2_COUNT:
1512 case HV_X64_MSR_STIMER3_COUNT: {
1513 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1514
1515 return stimer_set_count(to_hv_stimer(vcpu, timer_index),
1516 data, host);
1517 }
1518 case HV_X64_MSR_TSC_FREQUENCY:
1519 case HV_X64_MSR_APIC_FREQUENCY:
1520
1521 if (!host)
1522 return 1;
1523 break;
1524 default:
1525 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
1526 msr, data);
1527 return 1;
1528 }
1529
1530 return 0;
1531}
1532
1533static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1534 bool host)
1535{
1536 u64 data = 0;
1537 struct kvm *kvm = vcpu->kvm;
1538 struct kvm_hv *hv = to_kvm_hv(kvm);
1539
1540 if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
1541 return 1;
1542
1543 switch (msr) {
1544 case HV_X64_MSR_GUEST_OS_ID:
1545 data = hv->hv_guest_os_id;
1546 break;
1547 case HV_X64_MSR_HYPERCALL:
1548 data = hv->hv_hypercall;
1549 break;
1550 case HV_X64_MSR_TIME_REF_COUNT:
1551 data = get_time_ref_counter(kvm);
1552 break;
1553 case HV_X64_MSR_REFERENCE_TSC:
1554 data = hv->hv_tsc_page;
1555 break;
1556 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1557 return kvm_hv_msr_get_crash_data(kvm,
1558 msr - HV_X64_MSR_CRASH_P0,
1559 pdata);
1560 case HV_X64_MSR_CRASH_CTL:
1561 return kvm_hv_msr_get_crash_ctl(kvm, pdata);
1562 case HV_X64_MSR_RESET:
1563 data = 0;
1564 break;
1565 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1566 data = hv->hv_reenlightenment_control;
1567 break;
1568 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1569 data = hv->hv_tsc_emulation_control;
1570 break;
1571 case HV_X64_MSR_TSC_EMULATION_STATUS:
1572 data = hv->hv_tsc_emulation_status;
1573 break;
1574 case HV_X64_MSR_SYNDBG_OPTIONS:
1575 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1576 return syndbg_get_msr(vcpu, msr, pdata, host);
1577 default:
1578 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1579 return 1;
1580 }
1581
1582 *pdata = data;
1583 return 0;
1584}
1585
1586static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1587 bool host)
1588{
1589 u64 data = 0;
1590 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1591
1592 if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1593 return 1;
1594
1595 switch (msr) {
1596 case HV_X64_MSR_VP_INDEX:
1597 data = hv_vcpu->vp_index;
1598 break;
1599 case HV_X64_MSR_EOI:
1600 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1601 case HV_X64_MSR_ICR:
1602 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1603 case HV_X64_MSR_TPR:
1604 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1605 case HV_X64_MSR_VP_ASSIST_PAGE:
1606 data = hv_vcpu->hv_vapic;
1607 break;
1608 case HV_X64_MSR_VP_RUNTIME:
1609 data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
1610 break;
1611 case HV_X64_MSR_SCONTROL:
1612 case HV_X64_MSR_SVERSION:
1613 case HV_X64_MSR_SIEFP:
1614 case HV_X64_MSR_SIMP:
1615 case HV_X64_MSR_EOM:
1616 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1617 return synic_get_msr(to_hv_synic(vcpu), msr, pdata, host);
1618 case HV_X64_MSR_STIMER0_CONFIG:
1619 case HV_X64_MSR_STIMER1_CONFIG:
1620 case HV_X64_MSR_STIMER2_CONFIG:
1621 case HV_X64_MSR_STIMER3_CONFIG: {
1622 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1623
1624 return stimer_get_config(to_hv_stimer(vcpu, timer_index),
1625 pdata);
1626 }
1627 case HV_X64_MSR_STIMER0_COUNT:
1628 case HV_X64_MSR_STIMER1_COUNT:
1629 case HV_X64_MSR_STIMER2_COUNT:
1630 case HV_X64_MSR_STIMER3_COUNT: {
1631 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1632
1633 return stimer_get_count(to_hv_stimer(vcpu, timer_index),
1634 pdata);
1635 }
1636 case HV_X64_MSR_TSC_FREQUENCY:
1637 data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
1638 break;
1639 case HV_X64_MSR_APIC_FREQUENCY:
1640 data = APIC_BUS_FREQUENCY;
1641 break;
1642 default:
1643 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1644 return 1;
1645 }
1646 *pdata = data;
1647 return 0;
1648}
1649
1650int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1651{
1652 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1653
1654 if (!host && !vcpu->arch.hyperv_enabled)
1655 return 1;
1656
1657 if (!to_hv_vcpu(vcpu)) {
1658 if (kvm_hv_vcpu_init(vcpu))
1659 return 1;
1660 }
1661
1662 if (kvm_hv_msr_partition_wide(msr)) {
1663 int r;
1664
1665 mutex_lock(&hv->hv_lock);
1666 r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
1667 mutex_unlock(&hv->hv_lock);
1668 return r;
1669 } else
1670 return kvm_hv_set_msr(vcpu, msr, data, host);
1671}
1672
1673int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
1674{
1675 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1676
1677 if (!host && !vcpu->arch.hyperv_enabled)
1678 return 1;
1679
1680 if (!to_hv_vcpu(vcpu)) {
1681 if (kvm_hv_vcpu_init(vcpu))
1682 return 1;
1683 }
1684
1685 if (kvm_hv_msr_partition_wide(msr)) {
1686 int r;
1687
1688 mutex_lock(&hv->hv_lock);
1689 r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host);
1690 mutex_unlock(&hv->hv_lock);
1691 return r;
1692 } else
1693 return kvm_hv_get_msr(vcpu, msr, pdata, host);
1694}
1695
1696static __always_inline unsigned long *sparse_set_to_vcpu_mask(
1697 struct kvm *kvm, u64 *sparse_banks, u64 valid_bank_mask,
1698 u64 *vp_bitmap, unsigned long *vcpu_bitmap)
1699{
1700 struct kvm_hv *hv = to_kvm_hv(kvm);
1701 struct kvm_vcpu *vcpu;
1702 int i, bank, sbank = 0;
1703
1704 memset(vp_bitmap, 0,
1705 KVM_HV_MAX_SPARSE_VCPU_SET_BITS * sizeof(*vp_bitmap));
1706 for_each_set_bit(bank, (unsigned long *)&valid_bank_mask,
1707 KVM_HV_MAX_SPARSE_VCPU_SET_BITS)
1708 vp_bitmap[bank] = sparse_banks[sbank++];
1709
1710 if (likely(!atomic_read(&hv->num_mismatched_vp_indexes))) {
1711
1712 return (unsigned long *)vp_bitmap;
1713 }
1714
1715 bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
1716 kvm_for_each_vcpu(i, vcpu, kvm) {
1717 if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap))
1718 __set_bit(i, vcpu_bitmap);
1719 }
1720 return vcpu_bitmap;
1721}
1722
1723struct kvm_hv_hcall {
1724 u64 param;
1725 u64 ingpa;
1726 u64 outgpa;
1727 u16 code;
1728 u16 rep_cnt;
1729 u16 rep_idx;
1730 bool fast;
1731 bool rep;
1732 sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
1733};
1734
1735static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex)
1736{
1737 int i;
1738 gpa_t gpa;
1739 struct kvm *kvm = vcpu->kvm;
1740 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1741 struct hv_tlb_flush_ex flush_ex;
1742 struct hv_tlb_flush flush;
1743 u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1744 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
1745 unsigned long *vcpu_mask;
1746 u64 valid_bank_mask;
1747 u64 sparse_banks[64];
1748 int sparse_banks_len;
1749 bool all_cpus;
1750
1751 if (!ex) {
1752 if (hc->fast) {
1753 flush.address_space = hc->ingpa;
1754 flush.flags = hc->outgpa;
1755 flush.processor_mask = sse128_lo(hc->xmm[0]);
1756 } else {
1757 if (unlikely(kvm_read_guest(kvm, hc->ingpa,
1758 &flush, sizeof(flush))))
1759 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1760 }
1761
1762 trace_kvm_hv_flush_tlb(flush.processor_mask,
1763 flush.address_space, flush.flags);
1764
1765 valid_bank_mask = BIT_ULL(0);
1766 sparse_banks[0] = flush.processor_mask;
1767
1768
1769
1770
1771
1772
1773
1774
1775 all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
1776 flush.processor_mask == 0;
1777 } else {
1778 if (hc->fast) {
1779 flush_ex.address_space = hc->ingpa;
1780 flush_ex.flags = hc->outgpa;
1781 memcpy(&flush_ex.hv_vp_set,
1782 &hc->xmm[0], sizeof(hc->xmm[0]));
1783 } else {
1784 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex,
1785 sizeof(flush_ex))))
1786 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1787 }
1788
1789 trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
1790 flush_ex.hv_vp_set.format,
1791 flush_ex.address_space,
1792 flush_ex.flags);
1793
1794 valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask;
1795 all_cpus = flush_ex.hv_vp_set.format !=
1796 HV_GENERIC_SET_SPARSE_4K;
1797
1798 sparse_banks_len = bitmap_weight((unsigned long *)&valid_bank_mask, 64);
1799
1800 if (!sparse_banks_len && !all_cpus)
1801 goto ret_success;
1802
1803 if (!all_cpus) {
1804 if (hc->fast) {
1805 if (sparse_banks_len > HV_HYPERCALL_MAX_XMM_REGISTERS - 1)
1806 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1807 for (i = 0; i < sparse_banks_len; i += 2) {
1808 sparse_banks[i] = sse128_lo(hc->xmm[i / 2 + 1]);
1809 sparse_banks[i + 1] = sse128_hi(hc->xmm[i / 2 + 1]);
1810 }
1811 } else {
1812 gpa = hc->ingpa + offsetof(struct hv_tlb_flush_ex,
1813 hv_vp_set.bank_contents);
1814 if (unlikely(kvm_read_guest(kvm, gpa, sparse_banks,
1815 sparse_banks_len *
1816 sizeof(sparse_banks[0]))))
1817 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1818 }
1819 }
1820 }
1821
1822 cpumask_clear(&hv_vcpu->tlb_flush);
1823
1824 vcpu_mask = all_cpus ? NULL :
1825 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
1826 vp_bitmap, vcpu_bitmap);
1827
1828
1829
1830
1831
1832 kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST,
1833 NULL, vcpu_mask, &hv_vcpu->tlb_flush);
1834
1835ret_success:
1836
1837 return (u64)HV_STATUS_SUCCESS |
1838 ((u64)hc->rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
1839}
1840
1841static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
1842 unsigned long *vcpu_bitmap)
1843{
1844 struct kvm_lapic_irq irq = {
1845 .delivery_mode = APIC_DM_FIXED,
1846 .vector = vector
1847 };
1848 struct kvm_vcpu *vcpu;
1849 int i;
1850
1851 kvm_for_each_vcpu(i, vcpu, kvm) {
1852 if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
1853 continue;
1854
1855
1856 kvm_apic_set_irq(vcpu, &irq, NULL);
1857 }
1858}
1859
1860static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex)
1861{
1862 struct kvm *kvm = vcpu->kvm;
1863 struct hv_send_ipi_ex send_ipi_ex;
1864 struct hv_send_ipi send_ipi;
1865 u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1866 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
1867 unsigned long *vcpu_mask;
1868 unsigned long valid_bank_mask;
1869 u64 sparse_banks[64];
1870 int sparse_banks_len;
1871 u32 vector;
1872 bool all_cpus;
1873
1874 if (!ex) {
1875 if (!hc->fast) {
1876 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi,
1877 sizeof(send_ipi))))
1878 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1879 sparse_banks[0] = send_ipi.cpu_mask;
1880 vector = send_ipi.vector;
1881 } else {
1882
1883 if (unlikely(hc->ingpa >> 32 != 0))
1884 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1885 sparse_banks[0] = hc->outgpa;
1886 vector = (u32)hc->ingpa;
1887 }
1888 all_cpus = false;
1889 valid_bank_mask = BIT_ULL(0);
1890
1891 trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
1892 } else {
1893 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex,
1894 sizeof(send_ipi_ex))))
1895 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1896
1897 trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector,
1898 send_ipi_ex.vp_set.format,
1899 send_ipi_ex.vp_set.valid_bank_mask);
1900
1901 vector = send_ipi_ex.vector;
1902 valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
1903 sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) *
1904 sizeof(sparse_banks[0]);
1905
1906 all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
1907
1908 if (!sparse_banks_len)
1909 goto ret_success;
1910
1911 if (!all_cpus &&
1912 kvm_read_guest(kvm,
1913 hc->ingpa + offsetof(struct hv_send_ipi_ex,
1914 vp_set.bank_contents),
1915 sparse_banks,
1916 sparse_banks_len))
1917 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1918 }
1919
1920 if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
1921 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1922
1923 vcpu_mask = all_cpus ? NULL :
1924 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
1925 vp_bitmap, vcpu_bitmap);
1926
1927 kvm_send_ipi_to_many(kvm, vector, vcpu_mask);
1928
1929ret_success:
1930 return HV_STATUS_SUCCESS;
1931}
1932
1933void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
1934{
1935 struct kvm_cpuid_entry2 *entry;
1936 struct kvm_vcpu_hv *hv_vcpu;
1937
1938 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0);
1939 if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) {
1940 vcpu->arch.hyperv_enabled = true;
1941 } else {
1942 vcpu->arch.hyperv_enabled = false;
1943 return;
1944 }
1945
1946 if (!to_hv_vcpu(vcpu) && kvm_hv_vcpu_init(vcpu))
1947 return;
1948
1949 hv_vcpu = to_hv_vcpu(vcpu);
1950
1951 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES, 0);
1952 if (entry) {
1953 hv_vcpu->cpuid_cache.features_eax = entry->eax;
1954 hv_vcpu->cpuid_cache.features_ebx = entry->ebx;
1955 hv_vcpu->cpuid_cache.features_edx = entry->edx;
1956 } else {
1957 hv_vcpu->cpuid_cache.features_eax = 0;
1958 hv_vcpu->cpuid_cache.features_ebx = 0;
1959 hv_vcpu->cpuid_cache.features_edx = 0;
1960 }
1961
1962 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO, 0);
1963 if (entry) {
1964 hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax;
1965 hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx;
1966 } else {
1967 hv_vcpu->cpuid_cache.enlightenments_eax = 0;
1968 hv_vcpu->cpuid_cache.enlightenments_ebx = 0;
1969 }
1970
1971 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES, 0);
1972 if (entry)
1973 hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax;
1974 else
1975 hv_vcpu->cpuid_cache.syndbg_cap_eax = 0;
1976}
1977
1978int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce)
1979{
1980 struct kvm_vcpu_hv *hv_vcpu;
1981 int ret = 0;
1982
1983 if (!to_hv_vcpu(vcpu)) {
1984 if (enforce) {
1985 ret = kvm_hv_vcpu_init(vcpu);
1986 if (ret)
1987 return ret;
1988 } else {
1989 return 0;
1990 }
1991 }
1992
1993 hv_vcpu = to_hv_vcpu(vcpu);
1994 hv_vcpu->enforce_cpuid = enforce;
1995
1996 return ret;
1997}
1998
1999bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
2000{
2001 return vcpu->arch.hyperv_enabled && to_kvm_hv(vcpu->kvm)->hv_guest_os_id;
2002}
2003
2004static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
2005{
2006 bool longmode;
2007
2008 longmode = is_64_bit_mode(vcpu);
2009 if (longmode)
2010 kvm_rax_write(vcpu, result);
2011 else {
2012 kvm_rdx_write(vcpu, result >> 32);
2013 kvm_rax_write(vcpu, result & 0xffffffff);
2014 }
2015}
2016
2017static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
2018{
2019 trace_kvm_hv_hypercall_done(result);
2020 kvm_hv_hypercall_set_result(vcpu, result);
2021 ++vcpu->stat.hypercalls;
2022 return kvm_skip_emulated_instruction(vcpu);
2023}
2024
2025static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
2026{
2027 return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
2028}
2029
2030static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
2031{
2032 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
2033 struct eventfd_ctx *eventfd;
2034
2035 if (unlikely(!hc->fast)) {
2036 int ret;
2037 gpa_t gpa = hc->ingpa;
2038
2039 if ((gpa & (__alignof__(hc->ingpa) - 1)) ||
2040 offset_in_page(gpa) + sizeof(hc->ingpa) > PAGE_SIZE)
2041 return HV_STATUS_INVALID_ALIGNMENT;
2042
2043 ret = kvm_vcpu_read_guest(vcpu, gpa,
2044 &hc->ingpa, sizeof(hc->ingpa));
2045 if (ret < 0)
2046 return HV_STATUS_INVALID_ALIGNMENT;
2047 }
2048
2049
2050
2051
2052
2053
2054 if (hc->ingpa & 0xffff00000000ULL)
2055 return HV_STATUS_INVALID_PORT_ID;
2056
2057 if (hc->ingpa & ~KVM_HYPERV_CONN_ID_MASK)
2058 return HV_STATUS_INVALID_HYPERCALL_INPUT;
2059
2060
2061 rcu_read_lock();
2062 eventfd = idr_find(&hv->conn_to_evt, hc->ingpa);
2063 rcu_read_unlock();
2064 if (!eventfd)
2065 return HV_STATUS_INVALID_PORT_ID;
2066
2067 eventfd_signal(eventfd, 1);
2068 return HV_STATUS_SUCCESS;
2069}
2070
2071static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc)
2072{
2073 switch (hc->code) {
2074 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2075 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2076 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2077 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2078 return true;
2079 }
2080
2081 return false;
2082}
2083
2084static void kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall *hc)
2085{
2086 int reg;
2087
2088 kvm_fpu_get();
2089 for (reg = 0; reg < HV_HYPERCALL_MAX_XMM_REGISTERS; reg++)
2090 _kvm_read_sse_reg(reg, &hc->xmm[reg]);
2091 kvm_fpu_put();
2092}
2093
2094static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
2095{
2096 if (!hv_vcpu->enforce_cpuid)
2097 return true;
2098
2099 switch (code) {
2100 case HVCALL_NOTIFY_LONG_SPIN_WAIT:
2101 return hv_vcpu->cpuid_cache.enlightenments_ebx &&
2102 hv_vcpu->cpuid_cache.enlightenments_ebx != U32_MAX;
2103 case HVCALL_POST_MESSAGE:
2104 return hv_vcpu->cpuid_cache.features_ebx & HV_POST_MESSAGES;
2105 case HVCALL_SIGNAL_EVENT:
2106 return hv_vcpu->cpuid_cache.features_ebx & HV_SIGNAL_EVENTS;
2107 case HVCALL_POST_DEBUG_DATA:
2108 case HVCALL_RETRIEVE_DEBUG_DATA:
2109 case HVCALL_RESET_DEBUG_SESSION:
2110
2111
2112
2113
2114 return !kvm_hv_is_syndbg_enabled(hv_vcpu->vcpu) ||
2115 hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING;
2116 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2117 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2118 if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
2119 HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
2120 return false;
2121 fallthrough;
2122 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2123 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2124 return hv_vcpu->cpuid_cache.enlightenments_eax &
2125 HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
2126 case HVCALL_SEND_IPI_EX:
2127 if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
2128 HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
2129 return false;
2130 fallthrough;
2131 case HVCALL_SEND_IPI:
2132 return hv_vcpu->cpuid_cache.enlightenments_eax &
2133 HV_X64_CLUSTER_IPI_RECOMMENDED;
2134 default:
2135 break;
2136 }
2137
2138 return true;
2139}
2140
2141int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
2142{
2143 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2144 struct kvm_hv_hcall hc;
2145 u64 ret = HV_STATUS_SUCCESS;
2146
2147
2148
2149
2150
2151 if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) {
2152 kvm_queue_exception(vcpu, UD_VECTOR);
2153 return 1;
2154 }
2155
2156#ifdef CONFIG_X86_64
2157 if (is_64_bit_mode(vcpu)) {
2158 hc.param = kvm_rcx_read(vcpu);
2159 hc.ingpa = kvm_rdx_read(vcpu);
2160 hc.outgpa = kvm_r8_read(vcpu);
2161 } else
2162#endif
2163 {
2164 hc.param = ((u64)kvm_rdx_read(vcpu) << 32) |
2165 (kvm_rax_read(vcpu) & 0xffffffff);
2166 hc.ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
2167 (kvm_rcx_read(vcpu) & 0xffffffff);
2168 hc.outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
2169 (kvm_rsi_read(vcpu) & 0xffffffff);
2170 }
2171
2172 hc.code = hc.param & 0xffff;
2173 hc.fast = !!(hc.param & HV_HYPERCALL_FAST_BIT);
2174 hc.rep_cnt = (hc.param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
2175 hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
2176 hc.rep = !!(hc.rep_cnt || hc.rep_idx);
2177
2178 trace_kvm_hv_hypercall(hc.code, hc.fast, hc.rep_cnt, hc.rep_idx,
2179 hc.ingpa, hc.outgpa);
2180
2181 if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) {
2182 ret = HV_STATUS_ACCESS_DENIED;
2183 goto hypercall_complete;
2184 }
2185
2186 if (hc.fast && is_xmm_fast_hypercall(&hc)) {
2187 if (unlikely(hv_vcpu->enforce_cpuid &&
2188 !(hv_vcpu->cpuid_cache.features_edx &
2189 HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE))) {
2190 kvm_queue_exception(vcpu, UD_VECTOR);
2191 return 1;
2192 }
2193
2194 kvm_hv_hypercall_read_xmm(&hc);
2195 }
2196
2197 switch (hc.code) {
2198 case HVCALL_NOTIFY_LONG_SPIN_WAIT:
2199 if (unlikely(hc.rep)) {
2200 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2201 break;
2202 }
2203 kvm_vcpu_on_spin(vcpu, true);
2204 break;
2205 case HVCALL_SIGNAL_EVENT:
2206 if (unlikely(hc.rep)) {
2207 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2208 break;
2209 }
2210 ret = kvm_hvcall_signal_event(vcpu, &hc);
2211 if (ret != HV_STATUS_INVALID_PORT_ID)
2212 break;
2213 fallthrough;
2214 case HVCALL_POST_MESSAGE:
2215
2216 if (unlikely(hc.rep || !to_hv_synic(vcpu)->active)) {
2217 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2218 break;
2219 }
2220 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
2221 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
2222 vcpu->run->hyperv.u.hcall.input = hc.param;
2223 vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
2224 vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
2225 vcpu->arch.complete_userspace_io =
2226 kvm_hv_hypercall_complete_userspace;
2227 return 0;
2228 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2229 if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
2230 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2231 break;
2232 }
2233 ret = kvm_hv_flush_tlb(vcpu, &hc, false);
2234 break;
2235 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2236 if (unlikely(hc.rep)) {
2237 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2238 break;
2239 }
2240 ret = kvm_hv_flush_tlb(vcpu, &hc, false);
2241 break;
2242 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2243 if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
2244 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2245 break;
2246 }
2247 ret = kvm_hv_flush_tlb(vcpu, &hc, true);
2248 break;
2249 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2250 if (unlikely(hc.rep)) {
2251 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2252 break;
2253 }
2254 ret = kvm_hv_flush_tlb(vcpu, &hc, true);
2255 break;
2256 case HVCALL_SEND_IPI:
2257 if (unlikely(hc.rep)) {
2258 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2259 break;
2260 }
2261 ret = kvm_hv_send_ipi(vcpu, &hc, false);
2262 break;
2263 case HVCALL_SEND_IPI_EX:
2264 if (unlikely(hc.fast || hc.rep)) {
2265 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2266 break;
2267 }
2268 ret = kvm_hv_send_ipi(vcpu, &hc, true);
2269 break;
2270 case HVCALL_POST_DEBUG_DATA:
2271 case HVCALL_RETRIEVE_DEBUG_DATA:
2272 if (unlikely(hc.fast)) {
2273 ret = HV_STATUS_INVALID_PARAMETER;
2274 break;
2275 }
2276 fallthrough;
2277 case HVCALL_RESET_DEBUG_SESSION: {
2278 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
2279
2280 if (!kvm_hv_is_syndbg_enabled(vcpu)) {
2281 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
2282 break;
2283 }
2284
2285 if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) {
2286 ret = HV_STATUS_OPERATION_DENIED;
2287 break;
2288 }
2289 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
2290 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
2291 vcpu->run->hyperv.u.hcall.input = hc.param;
2292 vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
2293 vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
2294 vcpu->arch.complete_userspace_io =
2295 kvm_hv_hypercall_complete_userspace;
2296 return 0;
2297 }
2298 default:
2299 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
2300 break;
2301 }
2302
2303hypercall_complete:
2304 return kvm_hv_hypercall_complete(vcpu, ret);
2305}
2306
2307void kvm_hv_init_vm(struct kvm *kvm)
2308{
2309 struct kvm_hv *hv = to_kvm_hv(kvm);
2310
2311 mutex_init(&hv->hv_lock);
2312 idr_init(&hv->conn_to_evt);
2313}
2314
2315void kvm_hv_destroy_vm(struct kvm *kvm)
2316{
2317 struct kvm_hv *hv = to_kvm_hv(kvm);
2318 struct eventfd_ctx *eventfd;
2319 int i;
2320
2321 idr_for_each_entry(&hv->conn_to_evt, eventfd, i)
2322 eventfd_ctx_put(eventfd);
2323 idr_destroy(&hv->conn_to_evt);
2324}
2325
2326static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
2327{
2328 struct kvm_hv *hv = to_kvm_hv(kvm);
2329 struct eventfd_ctx *eventfd;
2330 int ret;
2331
2332 eventfd = eventfd_ctx_fdget(fd);
2333 if (IS_ERR(eventfd))
2334 return PTR_ERR(eventfd);
2335
2336 mutex_lock(&hv->hv_lock);
2337 ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1,
2338 GFP_KERNEL_ACCOUNT);
2339 mutex_unlock(&hv->hv_lock);
2340
2341 if (ret >= 0)
2342 return 0;
2343
2344 if (ret == -ENOSPC)
2345 ret = -EEXIST;
2346 eventfd_ctx_put(eventfd);
2347 return ret;
2348}
2349
2350static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
2351{
2352 struct kvm_hv *hv = to_kvm_hv(kvm);
2353 struct eventfd_ctx *eventfd;
2354
2355 mutex_lock(&hv->hv_lock);
2356 eventfd = idr_remove(&hv->conn_to_evt, conn_id);
2357 mutex_unlock(&hv->hv_lock);
2358
2359 if (!eventfd)
2360 return -ENOENT;
2361
2362 synchronize_srcu(&kvm->srcu);
2363 eventfd_ctx_put(eventfd);
2364 return 0;
2365}
2366
2367int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
2368{
2369 if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) ||
2370 (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK))
2371 return -EINVAL;
2372
2373 if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN)
2374 return kvm_hv_eventfd_deassign(kvm, args->conn_id);
2375 return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);
2376}
2377
2378int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
2379 struct kvm_cpuid_entry2 __user *entries)
2380{
2381 uint16_t evmcs_ver = 0;
2382 struct kvm_cpuid_entry2 cpuid_entries[] = {
2383 { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
2384 { .function = HYPERV_CPUID_INTERFACE },
2385 { .function = HYPERV_CPUID_VERSION },
2386 { .function = HYPERV_CPUID_FEATURES },
2387 { .function = HYPERV_CPUID_ENLIGHTMENT_INFO },
2388 { .function = HYPERV_CPUID_IMPLEMENT_LIMITS },
2389 { .function = HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS },
2390 { .function = HYPERV_CPUID_SYNDBG_INTERFACE },
2391 { .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES },
2392 { .function = HYPERV_CPUID_NESTED_FEATURES },
2393 };
2394 int i, nent = ARRAY_SIZE(cpuid_entries);
2395
2396 if (kvm_x86_ops.nested_ops->get_evmcs_version)
2397 evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu);
2398
2399
2400 if (!evmcs_ver)
2401 --nent;
2402
2403 if (cpuid->nent < nent)
2404 return -E2BIG;
2405
2406 if (cpuid->nent > nent)
2407 cpuid->nent = nent;
2408
2409 for (i = 0; i < nent; i++) {
2410 struct kvm_cpuid_entry2 *ent = &cpuid_entries[i];
2411 u32 signature[3];
2412
2413 switch (ent->function) {
2414 case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS:
2415 memcpy(signature, "Linux KVM Hv", 12);
2416
2417 ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES;
2418 ent->ebx = signature[0];
2419 ent->ecx = signature[1];
2420 ent->edx = signature[2];
2421 break;
2422
2423 case HYPERV_CPUID_INTERFACE:
2424 ent->eax = HYPERV_CPUID_SIGNATURE_EAX;
2425 break;
2426
2427 case HYPERV_CPUID_VERSION:
2428
2429
2430
2431
2432 ent->eax = 0x00003839;
2433 ent->ebx = 0x000A0000;
2434 break;
2435
2436 case HYPERV_CPUID_FEATURES:
2437 ent->eax |= HV_MSR_VP_RUNTIME_AVAILABLE;
2438 ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
2439 ent->eax |= HV_MSR_SYNIC_AVAILABLE;
2440 ent->eax |= HV_MSR_SYNTIMER_AVAILABLE;
2441 ent->eax |= HV_MSR_APIC_ACCESS_AVAILABLE;
2442 ent->eax |= HV_MSR_HYPERCALL_AVAILABLE;
2443 ent->eax |= HV_MSR_VP_INDEX_AVAILABLE;
2444 ent->eax |= HV_MSR_RESET_AVAILABLE;
2445 ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
2446 ent->eax |= HV_ACCESS_FREQUENCY_MSRS;
2447 ent->eax |= HV_ACCESS_REENLIGHTENMENT;
2448
2449 ent->ebx |= HV_POST_MESSAGES;
2450 ent->ebx |= HV_SIGNAL_EVENTS;
2451
2452 ent->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
2453 ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
2454 ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
2455
2456 ent->ebx |= HV_DEBUGGING;
2457 ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE;
2458 ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
2459
2460
2461
2462
2463
2464 if (!vcpu || lapic_in_kernel(vcpu))
2465 ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
2466
2467 break;
2468
2469 case HYPERV_CPUID_ENLIGHTMENT_INFO:
2470 ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
2471 ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
2472 ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
2473 ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
2474 ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
2475 if (evmcs_ver)
2476 ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
2477 if (!cpu_smt_possible())
2478 ent->eax |= HV_X64_NO_NONARCH_CORESHARING;
2479
2480
2481
2482
2483 ent->ebx = 0x00000FFF;
2484
2485 break;
2486
2487 case HYPERV_CPUID_IMPLEMENT_LIMITS:
2488
2489 ent->eax = KVM_MAX_VCPUS;
2490
2491
2492
2493
2494 ent->ebx = 64;
2495
2496 break;
2497
2498 case HYPERV_CPUID_NESTED_FEATURES:
2499 ent->eax = evmcs_ver;
2500
2501 break;
2502
2503 case HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS:
2504 memcpy(signature, "Linux KVM Hv", 12);
2505
2506 ent->eax = 0;
2507 ent->ebx = signature[0];
2508 ent->ecx = signature[1];
2509 ent->edx = signature[2];
2510 break;
2511
2512 case HYPERV_CPUID_SYNDBG_INTERFACE:
2513 memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12);
2514 ent->eax = signature[0];
2515 break;
2516
2517 case HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES:
2518 ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
2519 break;
2520
2521 default:
2522 break;
2523 }
2524 }
2525
2526 if (copy_to_user(entries, cpuid_entries,
2527 nent * sizeof(struct kvm_cpuid_entry2)))
2528 return -EFAULT;
2529
2530 return 0;
2531}
2532