1
2#ifndef __KVM_HOST_H
3#define __KVM_HOST_H
4
5
6#include <linux/types.h>
7#include <linux/hardirq.h>
8#include <linux/list.h>
9#include <linux/mutex.h>
10#include <linux/spinlock.h>
11#include <linux/signal.h>
12#include <linux/sched.h>
13#include <linux/sched/stat.h>
14#include <linux/bug.h>
15#include <linux/minmax.h>
16#include <linux/mm.h>
17#include <linux/mmu_notifier.h>
18#include <linux/preempt.h>
19#include <linux/msi.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/rcupdate.h>
23#include <linux/ratelimit.h>
24#include <linux/err.h>
25#include <linux/irqflags.h>
26#include <linux/context_tracking.h>
27#include <linux/irqbypass.h>
28#include <linux/rcuwait.h>
29#include <linux/refcount.h>
30#include <linux/nospec.h>
31#include <asm/signal.h>
32
33#include <linux/kvm.h>
34#include <linux/kvm_para.h>
35
36#include <linux/kvm_types.h>
37
38#include <asm/kvm_host.h>
39#include <linux/kvm_dirty_ring.h>
40
41#ifndef KVM_MAX_VCPU_ID
42#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
43#endif
44
45
46
47
48
49
50#define KVM_MEMSLOT_INVALID (1UL << 16)
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71#define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63)
72
73
74#define KVM_MAX_MMIO_FRAGMENTS 2
75
76#ifndef KVM_ADDRESS_SPACE_NUM
77#define KVM_ADDRESS_SPACE_NUM 1
78#endif
79
80
81
82
83
84
85#define KVM_PFN_ERR_MASK (0x7ffULL << 52)
86#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
87#define KVM_PFN_NOSLOT (0x1ULL << 63)
88
89#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
90#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
91#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
92
93
94
95
96
97static inline bool is_error_pfn(kvm_pfn_t pfn)
98{
99 return !!(pfn & KVM_PFN_ERR_MASK);
100}
101
102
103
104
105
106
107static inline bool is_error_noslot_pfn(kvm_pfn_t pfn)
108{
109 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
110}
111
112
113static inline bool is_noslot_pfn(kvm_pfn_t pfn)
114{
115 return pfn == KVM_PFN_NOSLOT;
116}
117
118
119
120
121
122#ifndef KVM_HVA_ERR_BAD
123
124#define KVM_HVA_ERR_BAD (PAGE_OFFSET)
125#define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
126
127static inline bool kvm_is_error_hva(unsigned long addr)
128{
129 return addr >= PAGE_OFFSET;
130}
131
132#endif
133
134#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
135
136static inline bool is_error_page(struct page *page)
137{
138 return IS_ERR(page);
139}
140
141#define KVM_REQUEST_MASK GENMASK(7,0)
142#define KVM_REQUEST_NO_WAKEUP BIT(8)
143#define KVM_REQUEST_WAIT BIT(9)
144
145
146
147
148#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
149#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
150#define KVM_REQ_UNBLOCK 2
151#define KVM_REQ_UNHALT 3
152#define KVM_REQUEST_ARCH_BASE 8
153
154#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
155 BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
156 (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
157})
158#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
159
160#define KVM_USERSPACE_IRQ_SOURCE_ID 0
161#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
162
163extern struct mutex kvm_lock;
164extern struct list_head vm_list;
165
166struct kvm_io_range {
167 gpa_t addr;
168 int len;
169 struct kvm_io_device *dev;
170};
171
172#define NR_IOBUS_DEVS 1000
173
174struct kvm_io_bus {
175 int dev_count;
176 int ioeventfd_count;
177 struct kvm_io_range range[];
178};
179
180enum kvm_bus {
181 KVM_MMIO_BUS,
182 KVM_PIO_BUS,
183 KVM_VIRTIO_CCW_NOTIFY_BUS,
184 KVM_FAST_MMIO_BUS,
185 KVM_NR_BUSES
186};
187
188int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
189 int len, const void *val);
190int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
191 gpa_t addr, int len, const void *val, long cookie);
192int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
193 int len, void *val);
194int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
195 int len, struct kvm_io_device *dev);
196int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
197 struct kvm_io_device *dev);
198struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
199 gpa_t addr);
200
201#ifdef CONFIG_KVM_ASYNC_PF
202struct kvm_async_pf {
203 struct work_struct work;
204 struct list_head link;
205 struct list_head queue;
206 struct kvm_vcpu *vcpu;
207 struct mm_struct *mm;
208 gpa_t cr2_or_gpa;
209 unsigned long addr;
210 struct kvm_arch_async_pf arch;
211 bool wakeup_all;
212 bool notpresent_injected;
213};
214
215void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
216void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
217bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
218 unsigned long hva, struct kvm_arch_async_pf *arch);
219int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
220#endif
221
222#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
223struct kvm_gfn_range {
224 struct kvm_memory_slot *slot;
225 gfn_t start;
226 gfn_t end;
227 pte_t pte;
228 bool may_block;
229};
230bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
231bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
232bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
233bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
234#endif
235
236enum {
237 OUTSIDE_GUEST_MODE,
238 IN_GUEST_MODE,
239 EXITING_GUEST_MODE,
240 READING_SHADOW_PAGE_TABLES,
241};
242
243#define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA)
244
245struct kvm_host_map {
246
247
248
249
250
251
252
253
254 struct page *page;
255 void *hva;
256 kvm_pfn_t pfn;
257 kvm_pfn_t gfn;
258};
259
260
261
262
263
264static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
265{
266 return !!map->hva;
267}
268
269static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
270{
271 return single_task_running() && !need_resched() && ktime_before(cur, stop);
272}
273
274
275
276
277
278struct kvm_mmio_fragment {
279 gpa_t gpa;
280 void *data;
281 unsigned len;
282};
283
284struct kvm_vcpu {
285 struct kvm *kvm;
286#ifdef CONFIG_PREEMPT_NOTIFIERS
287 struct preempt_notifier preempt_notifier;
288#endif
289 int cpu;
290 int vcpu_id;
291 int vcpu_idx;
292 int srcu_idx;
293 int mode;
294 u64 requests;
295 unsigned long guest_debug;
296
297 int pre_pcpu;
298 struct list_head blocked_vcpu_list;
299
300 struct mutex mutex;
301 struct kvm_run *run;
302
303 struct rcuwait wait;
304 struct pid __rcu *pid;
305 int sigset_active;
306 sigset_t sigset;
307 struct kvm_vcpu_stat stat;
308 unsigned int halt_poll_ns;
309 bool valid_wakeup;
310
311#ifdef CONFIG_HAS_IOMEM
312 int mmio_needed;
313 int mmio_read_completed;
314 int mmio_is_write;
315 int mmio_cur_fragment;
316 int mmio_nr_fragments;
317 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
318#endif
319
320#ifdef CONFIG_KVM_ASYNC_PF
321 struct {
322 u32 queued;
323 struct list_head queue;
324 struct list_head done;
325 spinlock_t lock;
326 } async_pf;
327#endif
328
329#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
330
331
332
333
334
335
336 struct {
337 bool in_spin_loop;
338 bool dy_eligible;
339 } spin_loop;
340#endif
341 bool preempted;
342 bool ready;
343 struct kvm_vcpu_arch arch;
344 struct kvm_dirty_ring dirty_ring;
345};
346
347
348static __always_inline void guest_enter_irqoff(void)
349{
350
351
352
353
354 instrumentation_begin();
355 vtime_account_guest_enter();
356 instrumentation_end();
357
358
359
360
361
362
363
364
365
366 if (!context_tracking_guest_enter()) {
367 instrumentation_begin();
368 rcu_virt_note_context_switch(smp_processor_id());
369 instrumentation_end();
370 }
371}
372
373static __always_inline void guest_exit_irqoff(void)
374{
375 context_tracking_guest_exit();
376
377 instrumentation_begin();
378
379 vtime_account_guest_exit();
380 instrumentation_end();
381}
382
383static inline void guest_exit(void)
384{
385 unsigned long flags;
386
387 local_irq_save(flags);
388 guest_exit_irqoff();
389 local_irq_restore(flags);
390}
391
392static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
393{
394
395
396
397
398
399 smp_mb__before_atomic();
400 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
401}
402
403
404
405
406
407#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
408
409struct kvm_memory_slot {
410 gfn_t base_gfn;
411 unsigned long npages;
412 unsigned long *dirty_bitmap;
413 struct kvm_arch_memory_slot arch;
414 unsigned long userspace_addr;
415 u32 flags;
416 short id;
417 u16 as_id;
418};
419
420static inline bool kvm_slot_dirty_track_enabled(struct kvm_memory_slot *slot)
421{
422 return slot->flags & KVM_MEM_LOG_DIRTY_PAGES;
423}
424
425static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
426{
427 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
428}
429
430static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot)
431{
432 unsigned long len = kvm_dirty_bitmap_bytes(memslot);
433
434 return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap);
435}
436
437#ifndef KVM_DIRTY_LOG_MANUAL_CAPS
438#define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE
439#endif
440
441struct kvm_s390_adapter_int {
442 u64 ind_addr;
443 u64 summary_addr;
444 u64 ind_offset;
445 u32 summary_offset;
446 u32 adapter_id;
447};
448
449struct kvm_hv_sint {
450 u32 vcpu;
451 u32 sint;
452};
453
454struct kvm_kernel_irq_routing_entry {
455 u32 gsi;
456 u32 type;
457 int (*set)(struct kvm_kernel_irq_routing_entry *e,
458 struct kvm *kvm, int irq_source_id, int level,
459 bool line_status);
460 union {
461 struct {
462 unsigned irqchip;
463 unsigned pin;
464 } irqchip;
465 struct {
466 u32 address_lo;
467 u32 address_hi;
468 u32 data;
469 u32 flags;
470 u32 devid;
471 } msi;
472 struct kvm_s390_adapter_int adapter;
473 struct kvm_hv_sint hv_sint;
474 };
475 struct hlist_node link;
476};
477
478#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
479struct kvm_irq_routing_table {
480 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
481 u32 nr_rt_entries;
482
483
484
485
486 struct hlist_head map[];
487};
488#endif
489
490#ifndef KVM_PRIVATE_MEM_SLOTS
491#define KVM_PRIVATE_MEM_SLOTS 0
492#endif
493
494#define KVM_MEM_SLOTS_NUM SHRT_MAX
495#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_PRIVATE_MEM_SLOTS)
496
497#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
498static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
499{
500 return 0;
501}
502#endif
503
504
505
506
507
508
509struct kvm_memslots {
510 u64 generation;
511
512 short id_to_index[KVM_MEM_SLOTS_NUM];
513 atomic_t lru_slot;
514 int used_slots;
515 struct kvm_memory_slot memslots[];
516};
517
518struct kvm {
519#ifdef KVM_HAVE_MMU_RWLOCK
520 rwlock_t mmu_lock;
521#else
522 spinlock_t mmu_lock;
523#endif
524
525 struct mutex slots_lock;
526 struct mm_struct *mm;
527 struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
528 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
529
530
531
532
533
534
535
536 atomic_t online_vcpus;
537 int created_vcpus;
538 int last_boosted_vcpu;
539 struct list_head vm_list;
540 struct mutex lock;
541 struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
542#ifdef CONFIG_HAVE_KVM_EVENTFD
543 struct {
544 spinlock_t lock;
545 struct list_head items;
546 struct list_head resampler_list;
547 struct mutex resampler_lock;
548 } irqfds;
549 struct list_head ioeventfds;
550#endif
551 struct kvm_vm_stat stat;
552 struct kvm_arch arch;
553 refcount_t users_count;
554#ifdef CONFIG_KVM_MMIO
555 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
556 spinlock_t ring_lock;
557 struct list_head coalesced_zones;
558#endif
559
560 struct mutex irq_lock;
561#ifdef CONFIG_HAVE_KVM_IRQCHIP
562
563
564
565 struct kvm_irq_routing_table __rcu *irq_routing;
566#endif
567#ifdef CONFIG_HAVE_KVM_IRQFD
568 struct hlist_head irq_ack_notifier_list;
569#endif
570
571#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
572 struct mmu_notifier mmu_notifier;
573 unsigned long mmu_notifier_seq;
574 long mmu_notifier_count;
575 unsigned long mmu_notifier_range_start;
576 unsigned long mmu_notifier_range_end;
577#endif
578 long tlbs_dirty;
579 struct list_head devices;
580 u64 manual_dirty_log_protect;
581 struct dentry *debugfs_dentry;
582 struct kvm_stat_data **debugfs_stat_data;
583 struct srcu_struct srcu;
584 struct srcu_struct irq_srcu;
585 pid_t userspace_pid;
586 unsigned int max_halt_poll_ns;
587 u32 dirty_ring_size;
588};
589
590#define kvm_err(fmt, ...) \
591 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
592#define kvm_info(fmt, ...) \
593 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
594#define kvm_debug(fmt, ...) \
595 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
596#define kvm_debug_ratelimited(fmt, ...) \
597 pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \
598 ## __VA_ARGS__)
599#define kvm_pr_unimpl(fmt, ...) \
600 pr_err_ratelimited("kvm [%i]: " fmt, \
601 task_tgid_nr(current), ## __VA_ARGS__)
602
603
604#define vcpu_unimpl(vcpu, fmt, ...) \
605 kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \
606 (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__)
607
608#define vcpu_debug(vcpu, fmt, ...) \
609 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
610#define vcpu_debug_ratelimited(vcpu, fmt, ...) \
611 kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \
612 ## __VA_ARGS__)
613#define vcpu_err(vcpu, fmt, ...) \
614 kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
615
616static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
617{
618 return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
619}
620
621static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
622{
623 return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
624 lockdep_is_held(&kvm->slots_lock) ||
625 !refcount_read(&kvm->users_count));
626}
627
628static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
629{
630 int num_vcpus = atomic_read(&kvm->online_vcpus);
631 i = array_index_nospec(i, num_vcpus);
632
633
634 smp_rmb();
635 return kvm->vcpus[i];
636}
637
638#define kvm_for_each_vcpu(idx, vcpup, kvm) \
639 for (idx = 0; \
640 idx < atomic_read(&kvm->online_vcpus) && \
641 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
642 idx++)
643
644static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
645{
646 struct kvm_vcpu *vcpu = NULL;
647 int i;
648
649 if (id < 0)
650 return NULL;
651 if (id < KVM_MAX_VCPUS)
652 vcpu = kvm_get_vcpu(kvm, id);
653 if (vcpu && vcpu->vcpu_id == id)
654 return vcpu;
655 kvm_for_each_vcpu(i, vcpu, kvm)
656 if (vcpu->vcpu_id == id)
657 return vcpu;
658 return NULL;
659}
660
661static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
662{
663 return vcpu->vcpu_idx;
664}
665
666#define kvm_for_each_memslot(memslot, slots) \
667 for (memslot = &slots->memslots[0]; \
668 memslot < slots->memslots + slots->used_slots; memslot++) \
669 if (WARN_ON_ONCE(!memslot->npages)) { \
670 } else
671
672void kvm_vcpu_destroy(struct kvm_vcpu *vcpu);
673
674void vcpu_load(struct kvm_vcpu *vcpu);
675void vcpu_put(struct kvm_vcpu *vcpu);
676
677#ifdef __KVM_HAVE_IOAPIC
678void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
679void kvm_arch_post_irq_routing_update(struct kvm *kvm);
680#else
681static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
682{
683}
684static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
685{
686}
687#endif
688
689#ifdef CONFIG_HAVE_KVM_IRQFD
690int kvm_irqfd_init(void);
691void kvm_irqfd_exit(void);
692#else
693static inline int kvm_irqfd_init(void)
694{
695 return 0;
696}
697
698static inline void kvm_irqfd_exit(void)
699{
700}
701#endif
702int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
703 struct module *module);
704void kvm_exit(void);
705
706void kvm_get_kvm(struct kvm *kvm);
707void kvm_put_kvm(struct kvm *kvm);
708bool file_is_kvm(struct file *file);
709void kvm_put_kvm_no_destroy(struct kvm *kvm);
710
711static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
712{
713 as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
714 return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
715 lockdep_is_held(&kvm->slots_lock) ||
716 !refcount_read(&kvm->users_count));
717}
718
719static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
720{
721 return __kvm_memslots(kvm, 0);
722}
723
724static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
725{
726 int as_id = kvm_arch_vcpu_memslots_id(vcpu);
727
728 return __kvm_memslots(vcpu->kvm, as_id);
729}
730
731static inline
732struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
733{
734 int index = slots->id_to_index[id];
735 struct kvm_memory_slot *slot;
736
737 if (index < 0)
738 return NULL;
739
740 slot = &slots->memslots[index];
741
742 WARN_ON(slot->id != id);
743 return slot;
744}
745
746
747
748
749
750
751
752
753
754
755
756
757enum kvm_mr_change {
758 KVM_MR_CREATE,
759 KVM_MR_DELETE,
760 KVM_MR_MOVE,
761 KVM_MR_FLAGS_ONLY,
762};
763
764int kvm_set_memory_region(struct kvm *kvm,
765 const struct kvm_userspace_memory_region *mem);
766int __kvm_set_memory_region(struct kvm *kvm,
767 const struct kvm_userspace_memory_region *mem);
768void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
769void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
770int kvm_arch_prepare_memory_region(struct kvm *kvm,
771 struct kvm_memory_slot *memslot,
772 const struct kvm_userspace_memory_region *mem,
773 enum kvm_mr_change change);
774void kvm_arch_commit_memory_region(struct kvm *kvm,
775 const struct kvm_userspace_memory_region *mem,
776 struct kvm_memory_slot *old,
777 const struct kvm_memory_slot *new,
778 enum kvm_mr_change change);
779
780void kvm_arch_flush_shadow_all(struct kvm *kvm);
781
782void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
783 struct kvm_memory_slot *slot);
784
785int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
786 struct page **pages, int nr_pages);
787
788struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
789unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
790unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
791unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
792unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
793 bool *writable);
794void kvm_release_page_clean(struct page *page);
795void kvm_release_page_dirty(struct page *page);
796void kvm_set_page_accessed(struct page *page);
797
798kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
799kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
800 bool *writable);
801kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
802kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
803kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
804 bool atomic, bool *async, bool write_fault,
805 bool *writable, hva_t *hva);
806
807void kvm_release_pfn_clean(kvm_pfn_t pfn);
808void kvm_release_pfn_dirty(kvm_pfn_t pfn);
809void kvm_set_pfn_dirty(kvm_pfn_t pfn);
810void kvm_set_pfn_accessed(kvm_pfn_t pfn);
811void kvm_get_pfn(kvm_pfn_t pfn);
812
813void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
814int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
815 int len);
816int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
817int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
818 void *data, unsigned long len);
819int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
820 void *data, unsigned int offset,
821 unsigned long len);
822int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
823 int offset, int len);
824int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
825 unsigned long len);
826int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
827 void *data, unsigned long len);
828int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
829 void *data, unsigned int offset,
830 unsigned long len);
831int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
832 gpa_t gpa, unsigned long len);
833
834#define __kvm_get_guest(kvm, gfn, offset, v) \
835({ \
836 unsigned long __addr = gfn_to_hva(kvm, gfn); \
837 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
838 int __ret = -EFAULT; \
839 \
840 if (!kvm_is_error_hva(__addr)) \
841 __ret = get_user(v, __uaddr); \
842 __ret; \
843})
844
845#define kvm_get_guest(kvm, gpa, v) \
846({ \
847 gpa_t __gpa = gpa; \
848 struct kvm *__kvm = kvm; \
849 \
850 __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \
851 offset_in_page(__gpa), v); \
852})
853
854#define __kvm_put_guest(kvm, gfn, offset, v) \
855({ \
856 unsigned long __addr = gfn_to_hva(kvm, gfn); \
857 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
858 int __ret = -EFAULT; \
859 \
860 if (!kvm_is_error_hva(__addr)) \
861 __ret = put_user(v, __uaddr); \
862 if (!__ret) \
863 mark_page_dirty(kvm, gfn); \
864 __ret; \
865})
866
867#define kvm_put_guest(kvm, gpa, v) \
868({ \
869 gpa_t __gpa = gpa; \
870 struct kvm *__kvm = kvm; \
871 \
872 __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \
873 offset_in_page(__gpa), v); \
874})
875
876int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
877struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
878bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
879bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
880unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
881void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t gfn);
882void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
883
884struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
885struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
886kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
887kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
888int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
889int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
890 struct gfn_to_pfn_cache *cache, bool atomic);
891struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
892void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
893int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
894 struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
895unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
896unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
897int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
898 int len);
899int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
900 unsigned long len);
901int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
902 unsigned long len);
903int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
904 int offset, int len);
905int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
906 unsigned long len);
907void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
908
909void kvm_sigset_activate(struct kvm_vcpu *vcpu);
910void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
911
912void kvm_vcpu_block(struct kvm_vcpu *vcpu);
913void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
914void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
915bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
916void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
917int kvm_vcpu_yield_to(struct kvm_vcpu *target);
918void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
919
920void kvm_flush_remote_tlbs(struct kvm *kvm);
921void kvm_reload_remote_mmus(struct kvm *kvm);
922
923#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
924int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
925int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc);
926void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
927void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
928#endif
929
930bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
931 struct kvm_vcpu *except,
932 unsigned long *vcpu_bitmap, cpumask_var_t tmp);
933bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
934bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
935 struct kvm_vcpu *except);
936bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
937 unsigned long *vcpu_bitmap);
938
939long kvm_arch_dev_ioctl(struct file *filp,
940 unsigned int ioctl, unsigned long arg);
941long kvm_arch_vcpu_ioctl(struct file *filp,
942 unsigned int ioctl, unsigned long arg);
943vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
944
945int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
946
947void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
948 struct kvm_memory_slot *slot,
949 gfn_t gfn_offset,
950 unsigned long mask);
951void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
952
953#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
954void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
955 const struct kvm_memory_slot *memslot);
956#else
957int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
958int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
959 int *is_dirty, struct kvm_memory_slot **memslot);
960#endif
961
962int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
963 bool line_status);
964int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
965 struct kvm_enable_cap *cap);
966long kvm_arch_vm_ioctl(struct file *filp,
967 unsigned int ioctl, unsigned long arg);
968
969int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
970int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
971
972int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
973 struct kvm_translation *tr);
974
975int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
976int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
977int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
978 struct kvm_sregs *sregs);
979int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
980 struct kvm_sregs *sregs);
981int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
982 struct kvm_mp_state *mp_state);
983int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
984 struct kvm_mp_state *mp_state);
985int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
986 struct kvm_guest_debug *dbg);
987int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);
988
989int kvm_arch_init(void *opaque);
990void kvm_arch_exit(void);
991
992void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
993
994void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
995void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
996int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
997int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
998void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
999void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
1000
1001#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
1002void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
1003#endif
1004
1005int kvm_arch_hardware_enable(void);
1006void kvm_arch_hardware_disable(void);
1007int kvm_arch_hardware_setup(void *opaque);
1008void kvm_arch_hardware_unsetup(void);
1009int kvm_arch_check_processor_compat(void *opaque);
1010int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
1011bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
1012int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
1013bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
1014bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
1015int kvm_arch_post_init_vm(struct kvm *kvm);
1016void kvm_arch_pre_destroy_vm(struct kvm *kvm);
1017
1018#ifndef __KVM_HAVE_ARCH_VM_ALLOC
1019
1020
1021
1022
1023static inline struct kvm *kvm_arch_alloc_vm(void)
1024{
1025 return kzalloc(sizeof(struct kvm), GFP_KERNEL);
1026}
1027
1028static inline void kvm_arch_free_vm(struct kvm *kvm)
1029{
1030 kfree(kvm);
1031}
1032#endif
1033
1034#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
1035static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
1036{
1037 return -ENOTSUPP;
1038}
1039#endif
1040
1041#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
1042void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
1043void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
1044bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
1045#else
1046static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
1047{
1048}
1049
1050static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
1051{
1052}
1053
1054static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
1055{
1056 return false;
1057}
1058#endif
1059#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
1060void kvm_arch_start_assignment(struct kvm *kvm);
1061void kvm_arch_end_assignment(struct kvm *kvm);
1062bool kvm_arch_has_assigned_device(struct kvm *kvm);
1063#else
1064static inline void kvm_arch_start_assignment(struct kvm *kvm)
1065{
1066}
1067
1068static inline void kvm_arch_end_assignment(struct kvm *kvm)
1069{
1070}
1071
1072static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
1073{
1074 return false;
1075}
1076#endif
1077
1078static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
1079{
1080#ifdef __KVM_HAVE_ARCH_WQP
1081 return vcpu->arch.waitp;
1082#else
1083 return &vcpu->wait;
1084#endif
1085}
1086
1087#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
1088
1089
1090
1091
1092
1093bool kvm_arch_intc_initialized(struct kvm *kvm);
1094#else
1095static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
1096{
1097 return true;
1098}
1099#endif
1100
1101int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
1102void kvm_arch_destroy_vm(struct kvm *kvm);
1103void kvm_arch_sync_events(struct kvm *kvm);
1104
1105int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
1106
1107bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
1108bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
1109bool kvm_is_transparent_hugepage(kvm_pfn_t pfn);
1110
1111struct kvm_irq_ack_notifier {
1112 struct hlist_node link;
1113 unsigned gsi;
1114 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
1115};
1116
1117int kvm_irq_map_gsi(struct kvm *kvm,
1118 struct kvm_kernel_irq_routing_entry *entries, int gsi);
1119int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
1120
1121int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1122 bool line_status);
1123int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
1124 int irq_source_id, int level, bool line_status);
1125int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
1126 struct kvm *kvm, int irq_source_id,
1127 int level, bool line_status);
1128bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
1129void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
1130void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
1131void kvm_register_irq_ack_notifier(struct kvm *kvm,
1132 struct kvm_irq_ack_notifier *kian);
1133void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
1134 struct kvm_irq_ack_notifier *kian);
1135int kvm_request_irq_source_id(struct kvm *kvm);
1136void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
1137bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147static inline struct kvm_memory_slot *
1148search_memslots(struct kvm_memslots *slots, gfn_t gfn)
1149{
1150 int start = 0, end = slots->used_slots;
1151 int slot = atomic_read(&slots->lru_slot);
1152 struct kvm_memory_slot *memslots = slots->memslots;
1153
1154 if (unlikely(!slots->used_slots))
1155 return NULL;
1156
1157 if (gfn >= memslots[slot].base_gfn &&
1158 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1159 return &memslots[slot];
1160
1161 while (start < end) {
1162 slot = start + (end - start) / 2;
1163
1164 if (gfn >= memslots[slot].base_gfn)
1165 end = slot;
1166 else
1167 start = slot + 1;
1168 }
1169
1170 if (start < slots->used_slots && gfn >= memslots[start].base_gfn &&
1171 gfn < memslots[start].base_gfn + memslots[start].npages) {
1172 atomic_set(&slots->lru_slot, start);
1173 return &memslots[start];
1174 }
1175
1176 return NULL;
1177}
1178
1179static inline struct kvm_memory_slot *
1180__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
1181{
1182 return search_memslots(slots, gfn);
1183}
1184
1185static inline unsigned long
1186__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
1187{
1188
1189
1190
1191
1192
1193
1194 unsigned long offset = gfn - slot->base_gfn;
1195 offset = array_index_nospec(offset, slot->npages);
1196 return slot->userspace_addr + offset * PAGE_SIZE;
1197}
1198
1199static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
1200{
1201 return gfn_to_memslot(kvm, gfn)->id;
1202}
1203
1204static inline gfn_t
1205hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
1206{
1207 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
1208
1209 return slot->base_gfn + gfn_offset;
1210}
1211
1212static inline gpa_t gfn_to_gpa(gfn_t gfn)
1213{
1214 return (gpa_t)gfn << PAGE_SHIFT;
1215}
1216
1217static inline gfn_t gpa_to_gfn(gpa_t gpa)
1218{
1219 return (gfn_t)(gpa >> PAGE_SHIFT);
1220}
1221
1222static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
1223{
1224 return (hpa_t)pfn << PAGE_SHIFT;
1225}
1226
1227static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu,
1228 gpa_t gpa)
1229{
1230 return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa));
1231}
1232
1233static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
1234{
1235 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
1236
1237 return kvm_is_error_hva(hva);
1238}
1239
1240enum kvm_stat_kind {
1241 KVM_STAT_VM,
1242 KVM_STAT_VCPU,
1243};
1244
1245struct kvm_stat_data {
1246 struct kvm *kvm;
1247 struct kvm_stats_debugfs_item *dbgfs_item;
1248};
1249
1250struct kvm_stats_debugfs_item {
1251 const char *name;
1252 int offset;
1253 enum kvm_stat_kind kind;
1254 int mode;
1255};
1256
1257#define KVM_DBGFS_GET_MODE(dbgfs_item) \
1258 ((dbgfs_item)->mode ? (dbgfs_item)->mode : 0644)
1259
1260#define VM_STAT(n, x, ...) \
1261 { n, offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__ }
1262#define VCPU_STAT(n, x, ...) \
1263 { n, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__ }
1264
1265extern struct kvm_stats_debugfs_item debugfs_entries[];
1266extern struct dentry *kvm_debugfs_dir;
1267
1268#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1269static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
1270{
1271 if (unlikely(kvm->mmu_notifier_count))
1272 return 1;
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283 smp_rmb();
1284 if (kvm->mmu_notifier_seq != mmu_seq)
1285 return 1;
1286 return 0;
1287}
1288
1289static inline int mmu_notifier_retry_hva(struct kvm *kvm,
1290 unsigned long mmu_seq,
1291 unsigned long hva)
1292{
1293 lockdep_assert_held(&kvm->mmu_lock);
1294
1295
1296
1297
1298
1299
1300 if (unlikely(kvm->mmu_notifier_count) &&
1301 hva >= kvm->mmu_notifier_range_start &&
1302 hva < kvm->mmu_notifier_range_end)
1303 return 1;
1304 if (kvm->mmu_notifier_seq != mmu_seq)
1305 return 1;
1306 return 0;
1307}
1308#endif
1309
1310#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
1311
1312#define KVM_MAX_IRQ_ROUTES 4096
1313
1314bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
1315int kvm_set_irq_routing(struct kvm *kvm,
1316 const struct kvm_irq_routing_entry *entries,
1317 unsigned nr,
1318 unsigned flags);
1319int kvm_set_routing_entry(struct kvm *kvm,
1320 struct kvm_kernel_irq_routing_entry *e,
1321 const struct kvm_irq_routing_entry *ue);
1322void kvm_free_irq_routing(struct kvm *kvm);
1323
1324#else
1325
1326static inline void kvm_free_irq_routing(struct kvm *kvm) {}
1327
1328#endif
1329
1330int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
1331
1332#ifdef CONFIG_HAVE_KVM_EVENTFD
1333
1334void kvm_eventfd_init(struct kvm *kvm);
1335int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
1336
1337#ifdef CONFIG_HAVE_KVM_IRQFD
1338int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
1339void kvm_irqfd_release(struct kvm *kvm);
1340void kvm_irq_routing_update(struct kvm *);
1341#else
1342static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1343{
1344 return -EINVAL;
1345}
1346
1347static inline void kvm_irqfd_release(struct kvm *kvm) {}
1348#endif
1349
1350#else
1351
1352static inline void kvm_eventfd_init(struct kvm *kvm) {}
1353
1354static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1355{
1356 return -EINVAL;
1357}
1358
1359static inline void kvm_irqfd_release(struct kvm *kvm) {}
1360
1361#ifdef CONFIG_HAVE_KVM_IRQCHIP
1362static inline void kvm_irq_routing_update(struct kvm *kvm)
1363{
1364}
1365#endif
1366
1367static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
1368{
1369 return -ENOSYS;
1370}
1371
1372#endif
1373
1374void kvm_arch_irq_routing_update(struct kvm *kvm);
1375
1376static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
1377{
1378
1379
1380
1381
1382 smp_wmb();
1383 set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
1384}
1385
1386static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
1387{
1388 return READ_ONCE(vcpu->requests);
1389}
1390
1391static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
1392{
1393 return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
1394}
1395
1396static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
1397{
1398 clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
1399}
1400
1401static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
1402{
1403 if (kvm_test_request(req, vcpu)) {
1404 kvm_clear_request(req, vcpu);
1405
1406
1407
1408
1409
1410 smp_mb__after_atomic();
1411 return true;
1412 } else {
1413 return false;
1414 }
1415}
1416
1417extern bool kvm_rebooting;
1418
1419extern unsigned int halt_poll_ns;
1420extern unsigned int halt_poll_ns_grow;
1421extern unsigned int halt_poll_ns_grow_start;
1422extern unsigned int halt_poll_ns_shrink;
1423
1424struct kvm_device {
1425 const struct kvm_device_ops *ops;
1426 struct kvm *kvm;
1427 void *private;
1428 struct list_head vm_node;
1429};
1430
1431
1432struct kvm_device_ops {
1433 const char *name;
1434
1435
1436
1437
1438
1439
1440 int (*create)(struct kvm_device *dev, u32 type);
1441
1442
1443
1444
1445
1446 void (*init)(struct kvm_device *dev);
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456 void (*destroy)(struct kvm_device *dev);
1457
1458
1459
1460
1461
1462
1463
1464
1465 void (*release)(struct kvm_device *dev);
1466
1467 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1468 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1469 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1470 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
1471 unsigned long arg);
1472 int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
1473};
1474
1475void kvm_device_get(struct kvm_device *dev);
1476void kvm_device_put(struct kvm_device *dev);
1477struct kvm_device *kvm_device_from_filp(struct file *filp);
1478int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
1479void kvm_unregister_device_ops(u32 type);
1480
1481extern struct kvm_device_ops kvm_mpic_ops;
1482extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
1483extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
1484
1485#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1486
1487static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1488{
1489 vcpu->spin_loop.in_spin_loop = val;
1490}
1491static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1492{
1493 vcpu->spin_loop.dy_eligible = val;
1494}
1495
1496#else
1497
1498static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1499{
1500}
1501
1502static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1503{
1504}
1505#endif
1506
1507static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
1508{
1509 return (memslot && memslot->id < KVM_USER_MEM_SLOTS &&
1510 !(memslot->flags & KVM_MEMSLOT_INVALID));
1511}
1512
1513struct kvm_vcpu *kvm_get_running_vcpu(void);
1514struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
1515
1516#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
1517bool kvm_arch_has_irq_bypass(void);
1518int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
1519 struct irq_bypass_producer *);
1520void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
1521 struct irq_bypass_producer *);
1522void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
1523void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
1524int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
1525 uint32_t guest_irq, bool set);
1526#endif
1527
1528#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
1529
1530static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
1531{
1532 return vcpu->valid_wakeup;
1533}
1534
1535#else
1536static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
1537{
1538 return true;
1539}
1540#endif
1541
1542#ifdef CONFIG_HAVE_KVM_NO_POLL
1543
1544bool kvm_arch_no_poll(struct kvm_vcpu *vcpu);
1545#else
1546static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
1547{
1548 return false;
1549}
1550#endif
1551
1552#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
1553long kvm_arch_vcpu_async_ioctl(struct file *filp,
1554 unsigned int ioctl, unsigned long arg);
1555#else
1556static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
1557 unsigned int ioctl,
1558 unsigned long arg)
1559{
1560 return -ENOIOCTLCMD;
1561}
1562#endif
1563
1564void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
1565 unsigned long start, unsigned long end);
1566
1567#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
1568int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
1569#else
1570static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
1571{
1572 return 0;
1573}
1574#endif
1575
1576typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
1577
1578int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
1579 uintptr_t data, const char *name,
1580 struct task_struct **thread_ptr);
1581
1582#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
1583static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
1584{
1585 vcpu->run->exit_reason = KVM_EXIT_INTR;
1586 vcpu->stat.signal_exits++;
1587}
1588#endif
1589
1590
1591
1592
1593
1594
1595#define KVM_DIRTY_RING_RSVD_ENTRIES 64
1596
1597
1598#define KVM_DIRTY_RING_MAX_ENTRIES 65536
1599
1600#endif
1601