linux/include/linux/kvm_host.h
<<
>>
Prefs
   1#ifndef __KVM_HOST_H
   2#define __KVM_HOST_H
   3
   4/*
   5 * This work is licensed under the terms of the GNU GPL, version 2.  See
   6 * the COPYING file in the top-level directory.
   7 */
   8
   9#include <linux/types.h>
  10#include <linux/hardirq.h>
  11#include <linux/list.h>
  12#include <linux/mutex.h>
  13#include <linux/spinlock.h>
  14#include <linux/signal.h>
  15#include <linux/sched.h>
  16#include <linux/mm.h>
  17#include <linux/preempt.h>
  18#include <linux/msi.h>
  19#include <linux/slab.h>
  20#include <linux/rcupdate.h>
  21#include <asm/signal.h>
  22
  23#include <linux/kvm.h>
  24#include <linux/kvm_para.h>
  25
  26#include <linux/kvm_types.h>
  27
  28#include <asm/kvm_host.h>
  29
  30/*
  31 * vcpu->requests bit members
  32 */
  33#define KVM_REQ_TLB_FLUSH          0
  34#define KVM_REQ_MIGRATE_TIMER      1
  35#define KVM_REQ_REPORT_TPR_ACCESS  2
  36#define KVM_REQ_MMU_RELOAD         3
  37#define KVM_REQ_TRIPLE_FAULT       4
  38#define KVM_REQ_PENDING_TIMER      5
  39#define KVM_REQ_UNHALT             6
  40#define KVM_REQ_MMU_SYNC           7
  41#define KVM_REQ_CLOCK_UPDATE       8
  42#define KVM_REQ_KICK               9
  43#define KVM_REQ_DEACTIVATE_FPU    10
  44#define KVM_REQ_EVENT             11
  45#define KVM_REQ_APF_HALT          12
  46#define KVM_REQ_NMI               13
  47
  48#define KVM_USERSPACE_IRQ_SOURCE_ID     0
  49
  50struct kvm;
  51struct kvm_vcpu;
  52extern struct kmem_cache *kvm_vcpu_cache;
  53
  54/*
  55 * It would be nice to use something smarter than a linear search, TBD...
  56 * Thankfully we dont expect many devices to register (famous last words :),
  57 * so until then it will suffice.  At least its abstracted so we can change
  58 * in one place.
  59 */
  60struct kvm_io_bus {
  61        int                   dev_count;
  62#define NR_IOBUS_DEVS 200
  63        struct kvm_io_device *devs[NR_IOBUS_DEVS];
  64};
  65
  66enum kvm_bus {
  67        KVM_MMIO_BUS,
  68        KVM_PIO_BUS,
  69        KVM_NR_BUSES
  70};
  71
  72int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
  73                     int len, const void *val);
  74int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
  75                    void *val);
  76int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
  77                            struct kvm_io_device *dev);
  78int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
  79                              struct kvm_io_device *dev);
  80
  81#ifdef CONFIG_KVM_ASYNC_PF
  82struct kvm_async_pf {
  83        struct work_struct work;
  84        struct list_head link;
  85        struct list_head queue;
  86        struct kvm_vcpu *vcpu;
  87        struct mm_struct *mm;
  88        gva_t gva;
  89        unsigned long addr;
  90        struct kvm_arch_async_pf arch;
  91        struct page *page;
  92        bool done;
  93};
  94
  95void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
  96void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
  97int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
  98                       struct kvm_arch_async_pf *arch);
  99int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
 100#endif
 101
 102enum {
 103        OUTSIDE_GUEST_MODE,
 104        IN_GUEST_MODE,
 105        EXITING_GUEST_MODE
 106};
 107
 108struct kvm_vcpu {
 109        struct kvm *kvm;
 110#ifdef CONFIG_PREEMPT_NOTIFIERS
 111        struct preempt_notifier preempt_notifier;
 112#endif
 113        int cpu;
 114        int vcpu_id;
 115        int srcu_idx;
 116        int mode;
 117        unsigned long requests;
 118        unsigned long guest_debug;
 119
 120        struct mutex mutex;
 121        struct kvm_run *run;
 122
 123        int fpu_active;
 124        int guest_fpu_loaded, guest_xcr0_loaded;
 125        wait_queue_head_t wq;
 126        struct pid *pid;
 127        int sigset_active;
 128        sigset_t sigset;
 129        struct kvm_vcpu_stat stat;
 130
 131#ifdef CONFIG_HAS_IOMEM
 132        int mmio_needed;
 133        int mmio_read_completed;
 134        int mmio_is_write;
 135        int mmio_size;
 136        unsigned char mmio_data[8];
 137        gpa_t mmio_phys_addr;
 138#endif
 139
 140#ifdef CONFIG_KVM_ASYNC_PF
 141        struct {
 142                u32 queued;
 143                struct list_head queue;
 144                struct list_head done;
 145                spinlock_t lock;
 146        } async_pf;
 147#endif
 148
 149        struct kvm_vcpu_arch arch;
 150};
 151
 152static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
 153{
 154        return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
 155}
 156
 157/*
 158 * Some of the bitops functions do not support too long bitmaps.
 159 * This number must be determined not to exceed such limits.
 160 */
 161#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
 162
 163struct kvm_lpage_info {
 164        unsigned long rmap_pde;
 165        int write_count;
 166};
 167
 168struct kvm_memory_slot {
 169        gfn_t base_gfn;
 170        unsigned long npages;
 171        unsigned long flags;
 172        unsigned long *rmap;
 173        unsigned long *dirty_bitmap;
 174        unsigned long *dirty_bitmap_head;
 175        struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
 176        unsigned long userspace_addr;
 177        int user_alloc;
 178        int id;
 179};
 180
 181static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
 182{
 183        return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
 184}
 185
 186struct kvm_kernel_irq_routing_entry {
 187        u32 gsi;
 188        u32 type;
 189        int (*set)(struct kvm_kernel_irq_routing_entry *e,
 190                   struct kvm *kvm, int irq_source_id, int level);
 191        union {
 192                struct {
 193                        unsigned irqchip;
 194                        unsigned pin;
 195                } irqchip;
 196                struct msi_msg msi;
 197        };
 198        struct hlist_node link;
 199};
 200
 201#ifdef __KVM_HAVE_IOAPIC
 202
 203struct kvm_irq_routing_table {
 204        int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS];
 205        struct kvm_kernel_irq_routing_entry *rt_entries;
 206        u32 nr_rt_entries;
 207        /*
 208         * Array indexed by gsi. Each entry contains list of irq chips
 209         * the gsi is connected to.
 210         */
 211        struct hlist_head map[0];
 212};
 213
 214#else
 215
 216struct kvm_irq_routing_table {};
 217
 218#endif
 219
 220struct kvm_memslots {
 221        int nmemslots;
 222        u64 generation;
 223        struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
 224                                        KVM_PRIVATE_MEM_SLOTS];
 225};
 226
 227struct kvm {
 228        spinlock_t mmu_lock;
 229        struct mutex slots_lock;
 230        struct mm_struct *mm; /* userspace tied to this vm */
 231        struct kvm_memslots *memslots;
 232        struct srcu_struct srcu;
 233#ifdef CONFIG_KVM_APIC_ARCHITECTURE
 234        u32 bsp_vcpu_id;
 235        struct kvm_vcpu *bsp_vcpu;
 236#endif
 237        struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
 238        atomic_t online_vcpus;
 239        int last_boosted_vcpu;
 240        struct list_head vm_list;
 241        struct mutex lock;
 242        struct kvm_io_bus *buses[KVM_NR_BUSES];
 243#ifdef CONFIG_HAVE_KVM_EVENTFD
 244        struct {
 245                spinlock_t        lock;
 246                struct list_head  items;
 247        } irqfds;
 248        struct list_head ioeventfds;
 249#endif
 250        struct kvm_vm_stat stat;
 251        struct kvm_arch arch;
 252        atomic_t users_count;
 253#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
 254        struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
 255        struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
 256#endif
 257
 258        struct mutex irq_lock;
 259#ifdef CONFIG_HAVE_KVM_IRQCHIP
 260        /*
 261         * Update side is protected by irq_lock and,
 262         * if configured, irqfds.lock.
 263         */
 264        struct kvm_irq_routing_table __rcu *irq_routing;
 265        struct hlist_head mask_notifier_list;
 266        struct hlist_head irq_ack_notifier_list;
 267#endif
 268
 269#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
 270        struct mmu_notifier mmu_notifier;
 271        unsigned long mmu_notifier_seq;
 272        long mmu_notifier_count;
 273#endif
 274        long tlbs_dirty;
 275};
 276
 277/* The guest did something we don't support. */
 278#define pr_unimpl(vcpu, fmt, ...)                                       \
 279 do {                                                                   \
 280        if (printk_ratelimit())                                         \
 281                printk(KERN_ERR "kvm: %i: cpu%i " fmt,                  \
 282                       current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
 283 } while (0)
 284
 285#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
 286#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
 287
 288static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
 289{
 290        smp_rmb();
 291        return kvm->vcpus[i];
 292}
 293
 294#define kvm_for_each_vcpu(idx, vcpup, kvm) \
 295        for (idx = 0, vcpup = kvm_get_vcpu(kvm, idx); \
 296             idx < atomic_read(&kvm->online_vcpus) && vcpup; \
 297             vcpup = kvm_get_vcpu(kvm, ++idx))
 298
 299int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
 300void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
 301
 302void vcpu_load(struct kvm_vcpu *vcpu);
 303void vcpu_put(struct kvm_vcpu *vcpu);
 304
 305int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 306                  struct module *module);
 307void kvm_exit(void);
 308
 309void kvm_get_kvm(struct kvm *kvm);
 310void kvm_put_kvm(struct kvm *kvm);
 311
 312static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
 313{
 314        return rcu_dereference_check(kvm->memslots,
 315                        srcu_read_lock_held(&kvm->srcu)
 316                        || lockdep_is_held(&kvm->slots_lock));
 317}
 318
 319#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
 320#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
 321static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
 322
 323extern struct page *bad_page;
 324extern pfn_t bad_pfn;
 325
 326int is_error_page(struct page *page);
 327int is_error_pfn(pfn_t pfn);
 328int is_hwpoison_pfn(pfn_t pfn);
 329int is_fault_pfn(pfn_t pfn);
 330int kvm_is_error_hva(unsigned long addr);
 331int kvm_set_memory_region(struct kvm *kvm,
 332                          struct kvm_userspace_memory_region *mem,
 333                          int user_alloc);
 334int __kvm_set_memory_region(struct kvm *kvm,
 335                            struct kvm_userspace_memory_region *mem,
 336                            int user_alloc);
 337int kvm_arch_prepare_memory_region(struct kvm *kvm,
 338                                struct kvm_memory_slot *memslot,
 339                                struct kvm_memory_slot old,
 340                                struct kvm_userspace_memory_region *mem,
 341                                int user_alloc);
 342void kvm_arch_commit_memory_region(struct kvm *kvm,
 343                                struct kvm_userspace_memory_region *mem,
 344                                struct kvm_memory_slot old,
 345                                int user_alloc);
 346void kvm_disable_largepages(void);
 347void kvm_arch_flush_shadow(struct kvm *kvm);
 348
 349int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
 350                            int nr_pages);
 351
 352struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
 353unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
 354void kvm_release_page_clean(struct page *page);
 355void kvm_release_page_dirty(struct page *page);
 356void kvm_set_page_dirty(struct page *page);
 357void kvm_set_page_accessed(struct page *page);
 358
 359pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr);
 360pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
 361pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
 362                       bool write_fault, bool *writable);
 363pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
 364pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
 365                      bool *writable);
 366pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
 367                         struct kvm_memory_slot *slot, gfn_t gfn);
 368int memslot_id(struct kvm *kvm, gfn_t gfn);
 369void kvm_release_pfn_dirty(pfn_t);
 370void kvm_release_pfn_clean(pfn_t pfn);
 371void kvm_set_pfn_dirty(pfn_t pfn);
 372void kvm_set_pfn_accessed(pfn_t pfn);
 373void kvm_get_pfn(pfn_t pfn);
 374
 375int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
 376                        int len);
 377int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
 378                          unsigned long len);
 379int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
 380int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
 381                         int offset, int len);
 382int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
 383                    unsigned long len);
 384int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 385                           void *data, unsigned long len);
 386int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 387                              gpa_t gpa);
 388int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
 389int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
 390struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
 391int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
 392unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
 393void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
 394void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
 395                             gfn_t gfn);
 396
 397void kvm_vcpu_block(struct kvm_vcpu *vcpu);
 398void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
 399void kvm_resched(struct kvm_vcpu *vcpu);
 400void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
 401void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
 402
 403void kvm_flush_remote_tlbs(struct kvm *kvm);
 404void kvm_reload_remote_mmus(struct kvm *kvm);
 405
 406long kvm_arch_dev_ioctl(struct file *filp,
 407                        unsigned int ioctl, unsigned long arg);
 408long kvm_arch_vcpu_ioctl(struct file *filp,
 409                         unsigned int ioctl, unsigned long arg);
 410
 411int kvm_dev_ioctl_check_extension(long ext);
 412
 413int kvm_get_dirty_log(struct kvm *kvm,
 414                        struct kvm_dirty_log *log, int *is_dirty);
 415int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
 416                                struct kvm_dirty_log *log);
 417
 418int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
 419                                   struct
 420                                   kvm_userspace_memory_region *mem,
 421                                   int user_alloc);
 422long kvm_arch_vm_ioctl(struct file *filp,
 423                       unsigned int ioctl, unsigned long arg);
 424
 425int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
 426int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
 427
 428int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 429                                    struct kvm_translation *tr);
 430
 431int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
 432int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
 433int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 434                                  struct kvm_sregs *sregs);
 435int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 436                                  struct kvm_sregs *sregs);
 437int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 438                                    struct kvm_mp_state *mp_state);
 439int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 440                                    struct kvm_mp_state *mp_state);
 441int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 442                                        struct kvm_guest_debug *dbg);
 443int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
 444
 445int kvm_arch_init(void *opaque);
 446void kvm_arch_exit(void);
 447
 448int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
 449void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
 450
 451void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
 452void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 453void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
 454struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
 455int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
 456void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
 457
 458int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
 459int kvm_arch_hardware_enable(void *garbage);
 460void kvm_arch_hardware_disable(void *garbage);
 461int kvm_arch_hardware_setup(void);
 462void kvm_arch_hardware_unsetup(void);
 463void kvm_arch_check_processor_compat(void *rtn);
 464int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
 465
 466void kvm_free_physmem(struct kvm *kvm);
 467
 468#ifndef __KVM_HAVE_ARCH_VM_ALLOC
 469static inline struct kvm *kvm_arch_alloc_vm(void)
 470{
 471        return kzalloc(sizeof(struct kvm), GFP_KERNEL);
 472}
 473
 474static inline void kvm_arch_free_vm(struct kvm *kvm)
 475{
 476        kfree(kvm);
 477}
 478#endif
 479
 480int kvm_arch_init_vm(struct kvm *kvm);
 481void kvm_arch_destroy_vm(struct kvm *kvm);
 482void kvm_free_all_assigned_devices(struct kvm *kvm);
 483void kvm_arch_sync_events(struct kvm *kvm);
 484
 485int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
 486void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 487
 488int kvm_is_mmio_pfn(pfn_t pfn);
 489
 490struct kvm_irq_ack_notifier {
 491        struct hlist_node link;
 492        unsigned gsi;
 493        void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
 494};
 495
 496struct kvm_assigned_dev_kernel {
 497        struct kvm_irq_ack_notifier ack_notifier;
 498        struct list_head list;
 499        int assigned_dev_id;
 500        int host_segnr;
 501        int host_busnr;
 502        int host_devfn;
 503        unsigned int entries_nr;
 504        int host_irq;
 505        bool host_irq_disabled;
 506        struct msix_entry *host_msix_entries;
 507        int guest_irq;
 508        struct msix_entry *guest_msix_entries;
 509        unsigned long irq_requested_type;
 510        int irq_source_id;
 511        int flags;
 512        struct pci_dev *dev;
 513        struct kvm *kvm;
 514        spinlock_t intx_lock;
 515        char irq_name[32];
 516};
 517
 518struct kvm_irq_mask_notifier {
 519        void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
 520        int irq;
 521        struct hlist_node link;
 522};
 523
 524void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
 525                                    struct kvm_irq_mask_notifier *kimn);
 526void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
 527                                      struct kvm_irq_mask_notifier *kimn);
 528void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
 529                             bool mask);
 530
 531#ifdef __KVM_HAVE_IOAPIC
 532void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
 533                                   union kvm_ioapic_redirect_entry *entry,
 534                                   unsigned long *deliver_bitmask);
 535#endif
 536int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
 537int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
 538                int irq_source_id, int level);
 539void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
 540void kvm_register_irq_ack_notifier(struct kvm *kvm,
 541                                   struct kvm_irq_ack_notifier *kian);
 542void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
 543                                   struct kvm_irq_ack_notifier *kian);
 544int kvm_request_irq_source_id(struct kvm *kvm);
 545void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
 546
 547/* For vcpu->arch.iommu_flags */
 548#define KVM_IOMMU_CACHE_COHERENCY       0x1
 549
 550#ifdef CONFIG_IOMMU_API
 551int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
 552int kvm_iommu_map_guest(struct kvm *kvm);
 553int kvm_iommu_unmap_guest(struct kvm *kvm);
 554int kvm_assign_device(struct kvm *kvm,
 555                      struct kvm_assigned_dev_kernel *assigned_dev);
 556int kvm_deassign_device(struct kvm *kvm,
 557                        struct kvm_assigned_dev_kernel *assigned_dev);
 558#else /* CONFIG_IOMMU_API */
 559static inline int kvm_iommu_map_pages(struct kvm *kvm,
 560                                      struct kvm_memory_slot *slot)
 561{
 562        return 0;
 563}
 564
 565static inline int kvm_iommu_map_guest(struct kvm *kvm)
 566{
 567        return -ENODEV;
 568}
 569
 570static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
 571{
 572        return 0;
 573}
 574
 575static inline int kvm_assign_device(struct kvm *kvm,
 576                struct kvm_assigned_dev_kernel *assigned_dev)
 577{
 578        return 0;
 579}
 580
 581static inline int kvm_deassign_device(struct kvm *kvm,
 582                struct kvm_assigned_dev_kernel *assigned_dev)
 583{
 584        return 0;
 585}
 586#endif /* CONFIG_IOMMU_API */
 587
 588static inline void kvm_guest_enter(void)
 589{
 590        account_system_vtime(current);
 591        current->flags |= PF_VCPU;
 592}
 593
 594static inline void kvm_guest_exit(void)
 595{
 596        account_system_vtime(current);
 597        current->flags &= ~PF_VCPU;
 598}
 599
 600static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
 601                                               gfn_t gfn)
 602{
 603        return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
 604}
 605
 606static inline gpa_t gfn_to_gpa(gfn_t gfn)
 607{
 608        return (gpa_t)gfn << PAGE_SHIFT;
 609}
 610
 611static inline gfn_t gpa_to_gfn(gpa_t gpa)
 612{
 613        return (gfn_t)(gpa >> PAGE_SHIFT);
 614}
 615
 616static inline hpa_t pfn_to_hpa(pfn_t pfn)
 617{
 618        return (hpa_t)pfn << PAGE_SHIFT;
 619}
 620
 621static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
 622{
 623        set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
 624}
 625
 626enum kvm_stat_kind {
 627        KVM_STAT_VM,
 628        KVM_STAT_VCPU,
 629};
 630
 631struct kvm_stats_debugfs_item {
 632        const char *name;
 633        int offset;
 634        enum kvm_stat_kind kind;
 635        struct dentry *dentry;
 636};
 637extern struct kvm_stats_debugfs_item debugfs_entries[];
 638extern struct dentry *kvm_debugfs_dir;
 639
 640#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
 641static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
 642{
 643        if (unlikely(vcpu->kvm->mmu_notifier_count))
 644                return 1;
 645        /*
 646         * Both reads happen under the mmu_lock and both values are
 647         * modified under mmu_lock, so there's no need of smb_rmb()
 648         * here in between, otherwise mmu_notifier_count should be
 649         * read before mmu_notifier_seq, see
 650         * mmu_notifier_invalidate_range_end write side.
 651         */
 652        if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
 653                return 1;
 654        return 0;
 655}
 656#endif
 657
 658#ifdef CONFIG_HAVE_KVM_IRQCHIP
 659
 660#define KVM_MAX_IRQ_ROUTES 1024
 661
 662int kvm_setup_default_irq_routing(struct kvm *kvm);
 663int kvm_set_irq_routing(struct kvm *kvm,
 664                        const struct kvm_irq_routing_entry *entries,
 665                        unsigned nr,
 666                        unsigned flags);
 667void kvm_free_irq_routing(struct kvm *kvm);
 668
 669#else
 670
 671static inline void kvm_free_irq_routing(struct kvm *kvm) {}
 672
 673#endif
 674
 675#ifdef CONFIG_HAVE_KVM_EVENTFD
 676
 677void kvm_eventfd_init(struct kvm *kvm);
 678int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
 679void kvm_irqfd_release(struct kvm *kvm);
 680void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
 681int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
 682
 683#else
 684
 685static inline void kvm_eventfd_init(struct kvm *kvm) {}
 686
 687static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
 688{
 689        return -EINVAL;
 690}
 691
 692static inline void kvm_irqfd_release(struct kvm *kvm) {}
 693
 694#ifdef CONFIG_HAVE_KVM_IRQCHIP
 695static inline void kvm_irq_routing_update(struct kvm *kvm,
 696                                          struct kvm_irq_routing_table *irq_rt)
 697{
 698        rcu_assign_pointer(kvm->irq_routing, irq_rt);
 699}
 700#endif
 701
 702static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
 703{
 704        return -ENOSYS;
 705}
 706
 707#endif /* CONFIG_HAVE_KVM_EVENTFD */
 708
 709#ifdef CONFIG_KVM_APIC_ARCHITECTURE
 710static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
 711{
 712        return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
 713}
 714#endif
 715
 716#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
 717
 718long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
 719                                  unsigned long arg);
 720
 721#else
 722
 723static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
 724                                                unsigned long arg)
 725{
 726        return -ENOTTY;
 727}
 728
 729#endif
 730
 731static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
 732{
 733        set_bit(req, &vcpu->requests);
 734}
 735
 736static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
 737{
 738        if (test_bit(req, &vcpu->requests)) {
 739                clear_bit(req, &vcpu->requests);
 740                return true;
 741        } else {
 742                return false;
 743        }
 744}
 745
 746#endif
 747
 748