1
2#ifndef __KVM_HOST_H
3#define __KVM_HOST_H
4
5
6#include <linux/types.h>
7#include <linux/hardirq.h>
8#include <linux/list.h>
9#include <linux/mutex.h>
10#include <linux/spinlock.h>
11#include <linux/signal.h>
12#include <linux/sched.h>
13#include <linux/sched/stat.h>
14#include <linux/bug.h>
15#include <linux/minmax.h>
16#include <linux/mm.h>
17#include <linux/mmu_notifier.h>
18#include <linux/preempt.h>
19#include <linux/msi.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/rcupdate.h>
23#include <linux/ratelimit.h>
24#include <linux/err.h>
25#include <linux/irqflags.h>
26#include <linux/context_tracking.h>
27#include <linux/irqbypass.h>
28#include <linux/rcuwait.h>
29#include <linux/refcount.h>
30#include <linux/nospec.h>
31#include <linux/notifier.h>
32#include <asm/signal.h>
33
34#include <linux/kvm.h>
35#include <linux/kvm_para.h>
36
37#include <linux/kvm_types.h>
38
39#include <asm/kvm_host.h>
40#include <linux/kvm_dirty_ring.h>
41
42#ifndef KVM_MAX_VCPU_ID
43#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
44#endif
45
46
47
48
49
50
51#define KVM_MEMSLOT_INVALID (1UL << 16)
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72#define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63)
73
74
75#define KVM_MAX_MMIO_FRAGMENTS 2
76
77#ifndef KVM_ADDRESS_SPACE_NUM
78#define KVM_ADDRESS_SPACE_NUM 1
79#endif
80
81
82
83
84
85
86#define KVM_PFN_ERR_MASK (0x7ffULL << 52)
87#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
88#define KVM_PFN_NOSLOT (0x1ULL << 63)
89
90#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
91#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
92#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
93
94
95
96
97
98static inline bool is_error_pfn(kvm_pfn_t pfn)
99{
100 return !!(pfn & KVM_PFN_ERR_MASK);
101}
102
103
104
105
106
107
108static inline bool is_error_noslot_pfn(kvm_pfn_t pfn)
109{
110 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
111}
112
113
114static inline bool is_noslot_pfn(kvm_pfn_t pfn)
115{
116 return pfn == KVM_PFN_NOSLOT;
117}
118
119
120
121
122
123#ifndef KVM_HVA_ERR_BAD
124
125#define KVM_HVA_ERR_BAD (PAGE_OFFSET)
126#define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
127
128static inline bool kvm_is_error_hva(unsigned long addr)
129{
130 return addr >= PAGE_OFFSET;
131}
132
133#endif
134
135#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
136
137static inline bool is_error_page(struct page *page)
138{
139 return IS_ERR(page);
140}
141
142#define KVM_REQUEST_MASK GENMASK(7,0)
143#define KVM_REQUEST_NO_WAKEUP BIT(8)
144#define KVM_REQUEST_WAIT BIT(9)
145
146
147
148
149#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
150#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
151#define KVM_REQ_UNBLOCK 2
152#define KVM_REQ_UNHALT 3
153#define KVM_REQUEST_ARCH_BASE 8
154
155#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
156 BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
157 (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
158})
159#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
160
161#define KVM_USERSPACE_IRQ_SOURCE_ID 0
162#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
163
164extern struct mutex kvm_lock;
165extern struct list_head vm_list;
166
167struct kvm_io_range {
168 gpa_t addr;
169 int len;
170 struct kvm_io_device *dev;
171};
172
173#define NR_IOBUS_DEVS 1000
174
175struct kvm_io_bus {
176 int dev_count;
177 int ioeventfd_count;
178 struct kvm_io_range range[];
179};
180
181enum kvm_bus {
182 KVM_MMIO_BUS,
183 KVM_PIO_BUS,
184 KVM_VIRTIO_CCW_NOTIFY_BUS,
185 KVM_FAST_MMIO_BUS,
186 KVM_NR_BUSES
187};
188
189int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
190 int len, const void *val);
191int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
192 gpa_t addr, int len, const void *val, long cookie);
193int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
194 int len, void *val);
195int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
196 int len, struct kvm_io_device *dev);
197int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
198 struct kvm_io_device *dev);
199struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
200 gpa_t addr);
201
202#ifdef CONFIG_KVM_ASYNC_PF
203struct kvm_async_pf {
204 struct work_struct work;
205 struct list_head link;
206 struct list_head queue;
207 struct kvm_vcpu *vcpu;
208 struct mm_struct *mm;
209 gpa_t cr2_or_gpa;
210 unsigned long addr;
211 struct kvm_arch_async_pf arch;
212 bool wakeup_all;
213 bool notpresent_injected;
214};
215
216void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
217void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
218bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
219 unsigned long hva, struct kvm_arch_async_pf *arch);
220int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
221#endif
222
223#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
224struct kvm_gfn_range {
225 struct kvm_memory_slot *slot;
226 gfn_t start;
227 gfn_t end;
228 pte_t pte;
229 bool may_block;
230};
231bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
232bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
233bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
234bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
235#endif
236
237enum {
238 OUTSIDE_GUEST_MODE,
239 IN_GUEST_MODE,
240 EXITING_GUEST_MODE,
241 READING_SHADOW_PAGE_TABLES,
242};
243
244#define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA)
245
246struct kvm_host_map {
247
248
249
250
251
252
253
254
255 struct page *page;
256 void *hva;
257 kvm_pfn_t pfn;
258 kvm_pfn_t gfn;
259};
260
261
262
263
264
265static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
266{
267 return !!map->hva;
268}
269
270static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
271{
272 return single_task_running() && !need_resched() && ktime_before(cur, stop);
273}
274
275
276
277
278
279struct kvm_mmio_fragment {
280 gpa_t gpa;
281 void *data;
282 unsigned len;
283};
284
285struct kvm_vcpu {
286 struct kvm *kvm;
287#ifdef CONFIG_PREEMPT_NOTIFIERS
288 struct preempt_notifier preempt_notifier;
289#endif
290 int cpu;
291 int vcpu_id;
292 int vcpu_idx;
293 int srcu_idx;
294 int mode;
295 u64 requests;
296 unsigned long guest_debug;
297
298 int pre_pcpu;
299 struct list_head blocked_vcpu_list;
300
301 struct mutex mutex;
302 struct kvm_run *run;
303
304 struct rcuwait wait;
305 struct pid __rcu *pid;
306 int sigset_active;
307 sigset_t sigset;
308 unsigned int halt_poll_ns;
309 bool valid_wakeup;
310
311#ifdef CONFIG_HAS_IOMEM
312 int mmio_needed;
313 int mmio_read_completed;
314 int mmio_is_write;
315 int mmio_cur_fragment;
316 int mmio_nr_fragments;
317 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
318#endif
319
320#ifdef CONFIG_KVM_ASYNC_PF
321 struct {
322 u32 queued;
323 struct list_head queue;
324 struct list_head done;
325 spinlock_t lock;
326 } async_pf;
327#endif
328
329#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
330
331
332
333
334
335
336 struct {
337 bool in_spin_loop;
338 bool dy_eligible;
339 } spin_loop;
340#endif
341 bool preempted;
342 bool ready;
343 struct kvm_vcpu_arch arch;
344 struct kvm_vcpu_stat stat;
345 char stats_id[KVM_STATS_NAME_SIZE];
346 struct kvm_dirty_ring dirty_ring;
347};
348
349
350static __always_inline void guest_enter_irqoff(void)
351{
352
353
354
355
356 instrumentation_begin();
357 vtime_account_guest_enter();
358 instrumentation_end();
359
360
361
362
363
364
365
366
367
368 if (!context_tracking_guest_enter()) {
369 instrumentation_begin();
370 rcu_virt_note_context_switch(smp_processor_id());
371 instrumentation_end();
372 }
373}
374
375static __always_inline void guest_exit_irqoff(void)
376{
377 context_tracking_guest_exit();
378
379 instrumentation_begin();
380
381 vtime_account_guest_exit();
382 instrumentation_end();
383}
384
385static inline void guest_exit(void)
386{
387 unsigned long flags;
388
389 local_irq_save(flags);
390 guest_exit_irqoff();
391 local_irq_restore(flags);
392}
393
394static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
395{
396
397
398
399
400
401 smp_mb__before_atomic();
402 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
403}
404
405
406
407
408
409#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
410
411struct kvm_memory_slot {
412 gfn_t base_gfn;
413 unsigned long npages;
414 unsigned long *dirty_bitmap;
415 struct kvm_arch_memory_slot arch;
416 unsigned long userspace_addr;
417 u32 flags;
418 short id;
419 u16 as_id;
420};
421
422static inline bool kvm_slot_dirty_track_enabled(struct kvm_memory_slot *slot)
423{
424 return slot->flags & KVM_MEM_LOG_DIRTY_PAGES;
425}
426
427static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
428{
429 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
430}
431
432static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot)
433{
434 unsigned long len = kvm_dirty_bitmap_bytes(memslot);
435
436 return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap);
437}
438
439#ifndef KVM_DIRTY_LOG_MANUAL_CAPS
440#define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE
441#endif
442
443struct kvm_s390_adapter_int {
444 u64 ind_addr;
445 u64 summary_addr;
446 u64 ind_offset;
447 u32 summary_offset;
448 u32 adapter_id;
449};
450
451struct kvm_hv_sint {
452 u32 vcpu;
453 u32 sint;
454};
455
456struct kvm_kernel_irq_routing_entry {
457 u32 gsi;
458 u32 type;
459 int (*set)(struct kvm_kernel_irq_routing_entry *e,
460 struct kvm *kvm, int irq_source_id, int level,
461 bool line_status);
462 union {
463 struct {
464 unsigned irqchip;
465 unsigned pin;
466 } irqchip;
467 struct {
468 u32 address_lo;
469 u32 address_hi;
470 u32 data;
471 u32 flags;
472 u32 devid;
473 } msi;
474 struct kvm_s390_adapter_int adapter;
475 struct kvm_hv_sint hv_sint;
476 };
477 struct hlist_node link;
478};
479
480#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
481struct kvm_irq_routing_table {
482 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
483 u32 nr_rt_entries;
484
485
486
487
488 struct hlist_head map[];
489};
490#endif
491
492#ifndef KVM_PRIVATE_MEM_SLOTS
493#define KVM_PRIVATE_MEM_SLOTS 0
494#endif
495
496#define KVM_MEM_SLOTS_NUM SHRT_MAX
497#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_PRIVATE_MEM_SLOTS)
498
499#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
500static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
501{
502 return 0;
503}
504#endif
505
506
507
508
509
510
511struct kvm_memslots {
512 u64 generation;
513
514 short id_to_index[KVM_MEM_SLOTS_NUM];
515 atomic_t lru_slot;
516 int used_slots;
517 struct kvm_memory_slot memslots[];
518};
519
520struct kvm {
521#ifdef KVM_HAVE_MMU_RWLOCK
522 rwlock_t mmu_lock;
523#else
524 spinlock_t mmu_lock;
525#endif
526
527 struct mutex slots_lock;
528
529
530
531
532
533
534
535
536 struct mutex slots_arch_lock;
537 struct mm_struct *mm;
538 struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
539 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
540
541
542
543
544
545
546
547 atomic_t online_vcpus;
548 int created_vcpus;
549 int last_boosted_vcpu;
550 struct list_head vm_list;
551 struct mutex lock;
552 struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
553#ifdef CONFIG_HAVE_KVM_EVENTFD
554 struct {
555 spinlock_t lock;
556 struct list_head items;
557 struct list_head resampler_list;
558 struct mutex resampler_lock;
559 } irqfds;
560 struct list_head ioeventfds;
561#endif
562 struct kvm_vm_stat stat;
563 struct kvm_arch arch;
564 refcount_t users_count;
565#ifdef CONFIG_KVM_MMIO
566 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
567 spinlock_t ring_lock;
568 struct list_head coalesced_zones;
569#endif
570
571 struct mutex irq_lock;
572#ifdef CONFIG_HAVE_KVM_IRQCHIP
573
574
575
576 struct kvm_irq_routing_table __rcu *irq_routing;
577#endif
578#ifdef CONFIG_HAVE_KVM_IRQFD
579 struct hlist_head irq_ack_notifier_list;
580#endif
581
582#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
583 struct mmu_notifier mmu_notifier;
584 unsigned long mmu_notifier_seq;
585 long mmu_notifier_count;
586 unsigned long mmu_notifier_range_start;
587 unsigned long mmu_notifier_range_end;
588#endif
589 long tlbs_dirty;
590 struct list_head devices;
591 u64 manual_dirty_log_protect;
592 struct dentry *debugfs_dentry;
593 struct kvm_stat_data **debugfs_stat_data;
594 struct srcu_struct srcu;
595 struct srcu_struct irq_srcu;
596 pid_t userspace_pid;
597 unsigned int max_halt_poll_ns;
598 u32 dirty_ring_size;
599
600#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
601 struct notifier_block pm_notifier;
602#endif
603 char stats_id[KVM_STATS_NAME_SIZE];
604};
605
606#define kvm_err(fmt, ...) \
607 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
608#define kvm_info(fmt, ...) \
609 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
610#define kvm_debug(fmt, ...) \
611 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
612#define kvm_debug_ratelimited(fmt, ...) \
613 pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \
614 ## __VA_ARGS__)
615#define kvm_pr_unimpl(fmt, ...) \
616 pr_err_ratelimited("kvm [%i]: " fmt, \
617 task_tgid_nr(current), ## __VA_ARGS__)
618
619
620#define vcpu_unimpl(vcpu, fmt, ...) \
621 kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \
622 (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__)
623
624#define vcpu_debug(vcpu, fmt, ...) \
625 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
626#define vcpu_debug_ratelimited(vcpu, fmt, ...) \
627 kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \
628 ## __VA_ARGS__)
629#define vcpu_err(vcpu, fmt, ...) \
630 kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
631
632static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
633{
634 return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
635}
636
637static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
638{
639 return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
640 lockdep_is_held(&kvm->slots_lock) ||
641 !refcount_read(&kvm->users_count));
642}
643
644static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
645{
646 int num_vcpus = atomic_read(&kvm->online_vcpus);
647 i = array_index_nospec(i, num_vcpus);
648
649
650 smp_rmb();
651 return kvm->vcpus[i];
652}
653
654#define kvm_for_each_vcpu(idx, vcpup, kvm) \
655 for (idx = 0; \
656 idx < atomic_read(&kvm->online_vcpus) && \
657 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
658 idx++)
659
660static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
661{
662 struct kvm_vcpu *vcpu = NULL;
663 int i;
664
665 if (id < 0)
666 return NULL;
667 if (id < KVM_MAX_VCPUS)
668 vcpu = kvm_get_vcpu(kvm, id);
669 if (vcpu && vcpu->vcpu_id == id)
670 return vcpu;
671 kvm_for_each_vcpu(i, vcpu, kvm)
672 if (vcpu->vcpu_id == id)
673 return vcpu;
674 return NULL;
675}
676
677static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
678{
679 return vcpu->vcpu_idx;
680}
681
682#define kvm_for_each_memslot(memslot, slots) \
683 for (memslot = &slots->memslots[0]; \
684 memslot < slots->memslots + slots->used_slots; memslot++) \
685 if (WARN_ON_ONCE(!memslot->npages)) { \
686 } else
687
688void kvm_vcpu_destroy(struct kvm_vcpu *vcpu);
689
690void vcpu_load(struct kvm_vcpu *vcpu);
691void vcpu_put(struct kvm_vcpu *vcpu);
692
693#ifdef __KVM_HAVE_IOAPIC
694void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
695void kvm_arch_post_irq_routing_update(struct kvm *kvm);
696#else
697static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
698{
699}
700static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
701{
702}
703#endif
704
705#ifdef CONFIG_HAVE_KVM_IRQFD
706int kvm_irqfd_init(void);
707void kvm_irqfd_exit(void);
708#else
709static inline int kvm_irqfd_init(void)
710{
711 return 0;
712}
713
714static inline void kvm_irqfd_exit(void)
715{
716}
717#endif
718int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
719 struct module *module);
720void kvm_exit(void);
721
722void kvm_get_kvm(struct kvm *kvm);
723void kvm_put_kvm(struct kvm *kvm);
724bool file_is_kvm(struct file *file);
725void kvm_put_kvm_no_destroy(struct kvm *kvm);
726
727static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
728{
729 as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
730 return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
731 lockdep_is_held(&kvm->slots_lock) ||
732 !refcount_read(&kvm->users_count));
733}
734
735static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
736{
737 return __kvm_memslots(kvm, 0);
738}
739
740static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
741{
742 int as_id = kvm_arch_vcpu_memslots_id(vcpu);
743
744 return __kvm_memslots(vcpu->kvm, as_id);
745}
746
747static inline
748struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
749{
750 int index = slots->id_to_index[id];
751 struct kvm_memory_slot *slot;
752
753 if (index < 0)
754 return NULL;
755
756 slot = &slots->memslots[index];
757
758 WARN_ON(slot->id != id);
759 return slot;
760}
761
762
763
764
765
766
767
768
769
770
771
772
773enum kvm_mr_change {
774 KVM_MR_CREATE,
775 KVM_MR_DELETE,
776 KVM_MR_MOVE,
777 KVM_MR_FLAGS_ONLY,
778};
779
780int kvm_set_memory_region(struct kvm *kvm,
781 const struct kvm_userspace_memory_region *mem);
782int __kvm_set_memory_region(struct kvm *kvm,
783 const struct kvm_userspace_memory_region *mem);
784void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
785void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
786int kvm_arch_prepare_memory_region(struct kvm *kvm,
787 struct kvm_memory_slot *memslot,
788 const struct kvm_userspace_memory_region *mem,
789 enum kvm_mr_change change);
790void kvm_arch_commit_memory_region(struct kvm *kvm,
791 const struct kvm_userspace_memory_region *mem,
792 struct kvm_memory_slot *old,
793 const struct kvm_memory_slot *new,
794 enum kvm_mr_change change);
795
796void kvm_arch_flush_shadow_all(struct kvm *kvm);
797
798void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
799 struct kvm_memory_slot *slot);
800
801int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
802 struct page **pages, int nr_pages);
803
804struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
805unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
806unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
807unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
808unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
809 bool *writable);
810void kvm_release_page_clean(struct page *page);
811void kvm_release_page_dirty(struct page *page);
812void kvm_set_page_accessed(struct page *page);
813
814kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
815kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
816 bool *writable);
817kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
818kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
819kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
820 bool atomic, bool *async, bool write_fault,
821 bool *writable, hva_t *hva);
822
823void kvm_release_pfn_clean(kvm_pfn_t pfn);
824void kvm_release_pfn_dirty(kvm_pfn_t pfn);
825void kvm_set_pfn_dirty(kvm_pfn_t pfn);
826void kvm_set_pfn_accessed(kvm_pfn_t pfn);
827void kvm_get_pfn(kvm_pfn_t pfn);
828
829void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
830int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
831 int len);
832int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
833int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
834 void *data, unsigned long len);
835int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
836 void *data, unsigned int offset,
837 unsigned long len);
838int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
839 int offset, int len);
840int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
841 unsigned long len);
842int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
843 void *data, unsigned long len);
844int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
845 void *data, unsigned int offset,
846 unsigned long len);
847int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
848 gpa_t gpa, unsigned long len);
849
850#define __kvm_get_guest(kvm, gfn, offset, v) \
851({ \
852 unsigned long __addr = gfn_to_hva(kvm, gfn); \
853 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
854 int __ret = -EFAULT; \
855 \
856 if (!kvm_is_error_hva(__addr)) \
857 __ret = get_user(v, __uaddr); \
858 __ret; \
859})
860
861#define kvm_get_guest(kvm, gpa, v) \
862({ \
863 gpa_t __gpa = gpa; \
864 struct kvm *__kvm = kvm; \
865 \
866 __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \
867 offset_in_page(__gpa), v); \
868})
869
870#define __kvm_put_guest(kvm, gfn, offset, v) \
871({ \
872 unsigned long __addr = gfn_to_hva(kvm, gfn); \
873 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
874 int __ret = -EFAULT; \
875 \
876 if (!kvm_is_error_hva(__addr)) \
877 __ret = put_user(v, __uaddr); \
878 if (!__ret) \
879 mark_page_dirty(kvm, gfn); \
880 __ret; \
881})
882
883#define kvm_put_guest(kvm, gpa, v) \
884({ \
885 gpa_t __gpa = gpa; \
886 struct kvm *__kvm = kvm; \
887 \
888 __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \
889 offset_in_page(__gpa), v); \
890})
891
892int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
893struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
894bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
895bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
896unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
897void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t gfn);
898void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
899
900struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
901struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
902kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
903kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
904int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
905int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
906 struct gfn_to_pfn_cache *cache, bool atomic);
907struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
908void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
909int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
910 struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
911unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
912unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
913int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
914 int len);
915int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
916 unsigned long len);
917int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
918 unsigned long len);
919int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
920 int offset, int len);
921int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
922 unsigned long len);
923void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
924
925void kvm_sigset_activate(struct kvm_vcpu *vcpu);
926void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
927
928void kvm_vcpu_block(struct kvm_vcpu *vcpu);
929void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
930void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
931bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
932void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
933int kvm_vcpu_yield_to(struct kvm_vcpu *target);
934void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
935
936void kvm_flush_remote_tlbs(struct kvm *kvm);
937void kvm_reload_remote_mmus(struct kvm *kvm);
938
939#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
940int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
941int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc);
942void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
943void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
944#endif
945
946bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
947 struct kvm_vcpu *except,
948 unsigned long *vcpu_bitmap, cpumask_var_t tmp);
949bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
950bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
951 struct kvm_vcpu *except);
952bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
953 unsigned long *vcpu_bitmap);
954
955long kvm_arch_dev_ioctl(struct file *filp,
956 unsigned int ioctl, unsigned long arg);
957long kvm_arch_vcpu_ioctl(struct file *filp,
958 unsigned int ioctl, unsigned long arg);
959vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
960
961int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
962
963void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
964 struct kvm_memory_slot *slot,
965 gfn_t gfn_offset,
966 unsigned long mask);
967void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
968
969#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
970void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
971 const struct kvm_memory_slot *memslot);
972#else
973int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
974int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
975 int *is_dirty, struct kvm_memory_slot **memslot);
976#endif
977
978int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
979 bool line_status);
980int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
981 struct kvm_enable_cap *cap);
982long kvm_arch_vm_ioctl(struct file *filp,
983 unsigned int ioctl, unsigned long arg);
984
985int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
986int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
987
988int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
989 struct kvm_translation *tr);
990
991int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
992int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
993int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
994 struct kvm_sregs *sregs);
995int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
996 struct kvm_sregs *sregs);
997int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
998 struct kvm_mp_state *mp_state);
999int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1000 struct kvm_mp_state *mp_state);
1001int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1002 struct kvm_guest_debug *dbg);
1003int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);
1004
1005int kvm_arch_init(void *opaque);
1006void kvm_arch_exit(void);
1007
1008void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
1009
1010void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
1011void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
1012int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
1013int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
1014void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
1015void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
1016
1017#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
1018int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state);
1019#endif
1020
1021#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
1022void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
1023#endif
1024
1025int kvm_arch_hardware_enable(void);
1026void kvm_arch_hardware_disable(void);
1027int kvm_arch_hardware_setup(void *opaque);
1028void kvm_arch_hardware_unsetup(void);
1029int kvm_arch_check_processor_compat(void *opaque);
1030int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
1031bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
1032int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
1033bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
1034bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
1035int kvm_arch_post_init_vm(struct kvm *kvm);
1036void kvm_arch_pre_destroy_vm(struct kvm *kvm);
1037
1038#ifndef __KVM_HAVE_ARCH_VM_ALLOC
1039
1040
1041
1042
1043static inline struct kvm *kvm_arch_alloc_vm(void)
1044{
1045 return kzalloc(sizeof(struct kvm), GFP_KERNEL);
1046}
1047
1048static inline void kvm_arch_free_vm(struct kvm *kvm)
1049{
1050 kfree(kvm);
1051}
1052#endif
1053
1054#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
1055static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
1056{
1057 return -ENOTSUPP;
1058}
1059#endif
1060
1061#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
1062void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
1063void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
1064bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
1065#else
1066static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
1067{
1068}
1069
1070static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
1071{
1072}
1073
1074static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
1075{
1076 return false;
1077}
1078#endif
1079#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
1080void kvm_arch_start_assignment(struct kvm *kvm);
1081void kvm_arch_end_assignment(struct kvm *kvm);
1082bool kvm_arch_has_assigned_device(struct kvm *kvm);
1083#else
1084static inline void kvm_arch_start_assignment(struct kvm *kvm)
1085{
1086}
1087
1088static inline void kvm_arch_end_assignment(struct kvm *kvm)
1089{
1090}
1091
1092static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
1093{
1094 return false;
1095}
1096#endif
1097
1098static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
1099{
1100#ifdef __KVM_HAVE_ARCH_WQP
1101 return vcpu->arch.waitp;
1102#else
1103 return &vcpu->wait;
1104#endif
1105}
1106
1107#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
1108
1109
1110
1111
1112
1113bool kvm_arch_intc_initialized(struct kvm *kvm);
1114#else
1115static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
1116{
1117 return true;
1118}
1119#endif
1120
1121int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
1122void kvm_arch_destroy_vm(struct kvm *kvm);
1123void kvm_arch_sync_events(struct kvm *kvm);
1124
1125int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
1126
1127bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
1128bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
1129bool kvm_is_transparent_hugepage(kvm_pfn_t pfn);
1130
1131struct kvm_irq_ack_notifier {
1132 struct hlist_node link;
1133 unsigned gsi;
1134 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
1135};
1136
1137int kvm_irq_map_gsi(struct kvm *kvm,
1138 struct kvm_kernel_irq_routing_entry *entries, int gsi);
1139int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
1140
1141int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1142 bool line_status);
1143int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
1144 int irq_source_id, int level, bool line_status);
1145int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
1146 struct kvm *kvm, int irq_source_id,
1147 int level, bool line_status);
1148bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
1149void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
1150void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
1151void kvm_register_irq_ack_notifier(struct kvm *kvm,
1152 struct kvm_irq_ack_notifier *kian);
1153void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
1154 struct kvm_irq_ack_notifier *kian);
1155int kvm_request_irq_source_id(struct kvm *kvm);
1156void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
1157bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167static inline struct kvm_memory_slot *
1168search_memslots(struct kvm_memslots *slots, gfn_t gfn)
1169{
1170 int start = 0, end = slots->used_slots;
1171 int slot = atomic_read(&slots->lru_slot);
1172 struct kvm_memory_slot *memslots = slots->memslots;
1173
1174 if (unlikely(!slots->used_slots))
1175 return NULL;
1176
1177 if (gfn >= memslots[slot].base_gfn &&
1178 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1179 return &memslots[slot];
1180
1181 while (start < end) {
1182 slot = start + (end - start) / 2;
1183
1184 if (gfn >= memslots[slot].base_gfn)
1185 end = slot;
1186 else
1187 start = slot + 1;
1188 }
1189
1190 if (start < slots->used_slots && gfn >= memslots[start].base_gfn &&
1191 gfn < memslots[start].base_gfn + memslots[start].npages) {
1192 atomic_set(&slots->lru_slot, start);
1193 return &memslots[start];
1194 }
1195
1196 return NULL;
1197}
1198
1199static inline struct kvm_memory_slot *
1200__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
1201{
1202 return search_memslots(slots, gfn);
1203}
1204
1205static inline unsigned long
1206__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
1207{
1208
1209
1210
1211
1212
1213
1214 unsigned long offset = gfn - slot->base_gfn;
1215 offset = array_index_nospec(offset, slot->npages);
1216 return slot->userspace_addr + offset * PAGE_SIZE;
1217}
1218
1219static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
1220{
1221 return gfn_to_memslot(kvm, gfn)->id;
1222}
1223
1224static inline gfn_t
1225hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
1226{
1227 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
1228
1229 return slot->base_gfn + gfn_offset;
1230}
1231
1232static inline gpa_t gfn_to_gpa(gfn_t gfn)
1233{
1234 return (gpa_t)gfn << PAGE_SHIFT;
1235}
1236
1237static inline gfn_t gpa_to_gfn(gpa_t gpa)
1238{
1239 return (gfn_t)(gpa >> PAGE_SHIFT);
1240}
1241
1242static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
1243{
1244 return (hpa_t)pfn << PAGE_SHIFT;
1245}
1246
1247static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu,
1248 gpa_t gpa)
1249{
1250 return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa));
1251}
1252
1253static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
1254{
1255 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
1256
1257 return kvm_is_error_hva(hva);
1258}
1259
1260enum kvm_stat_kind {
1261 KVM_STAT_VM,
1262 KVM_STAT_VCPU,
1263};
1264
1265struct kvm_stat_data {
1266 struct kvm *kvm;
1267 const struct _kvm_stats_desc *desc;
1268 enum kvm_stat_kind kind;
1269};
1270
1271struct _kvm_stats_desc {
1272 struct kvm_stats_desc desc;
1273 char name[KVM_STATS_NAME_SIZE];
1274};
1275
1276#define STATS_DESC_COMMON(type, unit, base, exp) \
1277 .flags = type | unit | base | \
1278 BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \
1279 BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \
1280 BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \
1281 .exponent = exp, \
1282 .size = 1
1283
1284#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp) \
1285 { \
1286 { \
1287 STATS_DESC_COMMON(type, unit, base, exp), \
1288 .offset = offsetof(struct kvm_vm_stat, generic.stat) \
1289 }, \
1290 .name = #stat, \
1291 }
1292#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp) \
1293 { \
1294 { \
1295 STATS_DESC_COMMON(type, unit, base, exp), \
1296 .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \
1297 }, \
1298 .name = #stat, \
1299 }
1300#define VM_STATS_DESC(stat, type, unit, base, exp) \
1301 { \
1302 { \
1303 STATS_DESC_COMMON(type, unit, base, exp), \
1304 .offset = offsetof(struct kvm_vm_stat, stat) \
1305 }, \
1306 .name = #stat, \
1307 }
1308#define VCPU_STATS_DESC(stat, type, unit, base, exp) \
1309 { \
1310 { \
1311 STATS_DESC_COMMON(type, unit, base, exp), \
1312 .offset = offsetof(struct kvm_vcpu_stat, stat) \
1313 }, \
1314 .name = #stat, \
1315 }
1316
1317#define STATS_DESC(SCOPE, stat, type, unit, base, exp) \
1318 SCOPE##_STATS_DESC(stat, type, unit, base, exp)
1319
1320#define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \
1321 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, unit, base, exponent)
1322#define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \
1323 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, unit, base, exponent)
1324#define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \
1325 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, unit, base, exponent)
1326
1327
1328#define STATS_DESC_COUNTER(SCOPE, name) \
1329 STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE, \
1330 KVM_STATS_BASE_POW10, 0)
1331
1332#define STATS_DESC_ICOUNTER(SCOPE, name) \
1333 STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE, \
1334 KVM_STATS_BASE_POW10, 0)
1335
1336#define STATS_DESC_PCOUNTER(SCOPE, name) \
1337 STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \
1338 KVM_STATS_BASE_POW10, 0)
1339
1340
1341#define STATS_DESC_TIME_NSEC(SCOPE, name) \
1342 STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
1343 KVM_STATS_BASE_POW10, -9)
1344
1345#define KVM_GENERIC_VM_STATS() \
1346 STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush)
1347
1348#define KVM_GENERIC_VCPU_STATS() \
1349 STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \
1350 STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll), \
1351 STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \
1352 STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \
1353 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \
1354 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns)
1355
1356extern struct dentry *kvm_debugfs_dir;
1357ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
1358 const struct _kvm_stats_desc *desc,
1359 void *stats, size_t size_stats,
1360 char __user *user_buffer, size_t size, loff_t *offset);
1361extern const struct kvm_stats_header kvm_vm_stats_header;
1362extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
1363extern const struct kvm_stats_header kvm_vcpu_stats_header;
1364extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
1365
1366#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1367static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
1368{
1369 if (unlikely(kvm->mmu_notifier_count))
1370 return 1;
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381 smp_rmb();
1382 if (kvm->mmu_notifier_seq != mmu_seq)
1383 return 1;
1384 return 0;
1385}
1386
1387static inline int mmu_notifier_retry_hva(struct kvm *kvm,
1388 unsigned long mmu_seq,
1389 unsigned long hva)
1390{
1391 lockdep_assert_held(&kvm->mmu_lock);
1392
1393
1394
1395
1396
1397
1398 if (unlikely(kvm->mmu_notifier_count) &&
1399 hva >= kvm->mmu_notifier_range_start &&
1400 hva < kvm->mmu_notifier_range_end)
1401 return 1;
1402 if (kvm->mmu_notifier_seq != mmu_seq)
1403 return 1;
1404 return 0;
1405}
1406#endif
1407
1408#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
1409
1410#define KVM_MAX_IRQ_ROUTES 4096
1411
1412bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
1413int kvm_set_irq_routing(struct kvm *kvm,
1414 const struct kvm_irq_routing_entry *entries,
1415 unsigned nr,
1416 unsigned flags);
1417int kvm_set_routing_entry(struct kvm *kvm,
1418 struct kvm_kernel_irq_routing_entry *e,
1419 const struct kvm_irq_routing_entry *ue);
1420void kvm_free_irq_routing(struct kvm *kvm);
1421
1422#else
1423
1424static inline void kvm_free_irq_routing(struct kvm *kvm) {}
1425
1426#endif
1427
1428int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
1429
1430#ifdef CONFIG_HAVE_KVM_EVENTFD
1431
1432void kvm_eventfd_init(struct kvm *kvm);
1433int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
1434
1435#ifdef CONFIG_HAVE_KVM_IRQFD
1436int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
1437void kvm_irqfd_release(struct kvm *kvm);
1438void kvm_irq_routing_update(struct kvm *);
1439#else
1440static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1441{
1442 return -EINVAL;
1443}
1444
1445static inline void kvm_irqfd_release(struct kvm *kvm) {}
1446#endif
1447
1448#else
1449
1450static inline void kvm_eventfd_init(struct kvm *kvm) {}
1451
1452static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1453{
1454 return -EINVAL;
1455}
1456
1457static inline void kvm_irqfd_release(struct kvm *kvm) {}
1458
1459#ifdef CONFIG_HAVE_KVM_IRQCHIP
1460static inline void kvm_irq_routing_update(struct kvm *kvm)
1461{
1462}
1463#endif
1464
1465static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
1466{
1467 return -ENOSYS;
1468}
1469
1470#endif
1471
1472void kvm_arch_irq_routing_update(struct kvm *kvm);
1473
1474static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
1475{
1476
1477
1478
1479
1480 smp_wmb();
1481 set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
1482}
1483
1484static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
1485{
1486 return READ_ONCE(vcpu->requests);
1487}
1488
1489static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
1490{
1491 return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
1492}
1493
1494static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
1495{
1496 clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
1497}
1498
1499static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
1500{
1501 if (kvm_test_request(req, vcpu)) {
1502 kvm_clear_request(req, vcpu);
1503
1504
1505
1506
1507
1508 smp_mb__after_atomic();
1509 return true;
1510 } else {
1511 return false;
1512 }
1513}
1514
1515extern bool kvm_rebooting;
1516
1517extern unsigned int halt_poll_ns;
1518extern unsigned int halt_poll_ns_grow;
1519extern unsigned int halt_poll_ns_grow_start;
1520extern unsigned int halt_poll_ns_shrink;
1521
1522struct kvm_device {
1523 const struct kvm_device_ops *ops;
1524 struct kvm *kvm;
1525 void *private;
1526 struct list_head vm_node;
1527};
1528
1529
1530struct kvm_device_ops {
1531 const char *name;
1532
1533
1534
1535
1536
1537
1538 int (*create)(struct kvm_device *dev, u32 type);
1539
1540
1541
1542
1543
1544 void (*init)(struct kvm_device *dev);
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554 void (*destroy)(struct kvm_device *dev);
1555
1556
1557
1558
1559
1560
1561
1562
1563 void (*release)(struct kvm_device *dev);
1564
1565 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1566 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1567 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1568 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
1569 unsigned long arg);
1570 int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
1571};
1572
1573void kvm_device_get(struct kvm_device *dev);
1574void kvm_device_put(struct kvm_device *dev);
1575struct kvm_device *kvm_device_from_filp(struct file *filp);
1576int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
1577void kvm_unregister_device_ops(u32 type);
1578
1579extern struct kvm_device_ops kvm_mpic_ops;
1580extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
1581extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
1582
1583#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1584
1585static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1586{
1587 vcpu->spin_loop.in_spin_loop = val;
1588}
1589static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1590{
1591 vcpu->spin_loop.dy_eligible = val;
1592}
1593
1594#else
1595
1596static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1597{
1598}
1599
1600static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1601{
1602}
1603#endif
1604
1605static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
1606{
1607 return (memslot && memslot->id < KVM_USER_MEM_SLOTS &&
1608 !(memslot->flags & KVM_MEMSLOT_INVALID));
1609}
1610
1611struct kvm_vcpu *kvm_get_running_vcpu(void);
1612struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
1613
1614#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
1615bool kvm_arch_has_irq_bypass(void);
1616int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
1617 struct irq_bypass_producer *);
1618void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
1619 struct irq_bypass_producer *);
1620void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
1621void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
1622int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
1623 uint32_t guest_irq, bool set);
1624#endif
1625
1626#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
1627
1628static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
1629{
1630 return vcpu->valid_wakeup;
1631}
1632
1633#else
1634static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
1635{
1636 return true;
1637}
1638#endif
1639
1640#ifdef CONFIG_HAVE_KVM_NO_POLL
1641
1642bool kvm_arch_no_poll(struct kvm_vcpu *vcpu);
1643#else
1644static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
1645{
1646 return false;
1647}
1648#endif
1649
1650#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
1651long kvm_arch_vcpu_async_ioctl(struct file *filp,
1652 unsigned int ioctl, unsigned long arg);
1653#else
1654static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
1655 unsigned int ioctl,
1656 unsigned long arg)
1657{
1658 return -ENOIOCTLCMD;
1659}
1660#endif
1661
1662void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
1663 unsigned long start, unsigned long end);
1664
1665#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
1666int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
1667#else
1668static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
1669{
1670 return 0;
1671}
1672#endif
1673
1674typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
1675
1676int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
1677 uintptr_t data, const char *name,
1678 struct task_struct **thread_ptr);
1679
1680#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
1681static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
1682{
1683 vcpu->run->exit_reason = KVM_EXIT_INTR;
1684 vcpu->stat.signal_exits++;
1685}
1686#endif
1687
1688
1689
1690
1691
1692
1693#define KVM_DIRTY_RING_RSVD_ENTRIES 64
1694
1695
1696#define KVM_DIRTY_RING_MAX_ENTRIES 65536
1697
1698#endif
1699