linux/include/linux/kvm_host.h
<<
>>
Prefs
   1#ifndef __KVM_HOST_H
   2#define __KVM_HOST_H
   3
   4/*
   5 * This work is licensed under the terms of the GNU GPL, version 2.  See
   6 * the COPYING file in the top-level directory.
   7 */
   8
   9#include <linux/types.h>
  10#include <linux/hardirq.h>
  11#include <linux/list.h>
  12#include <linux/mutex.h>
  13#include <linux/spinlock.h>
  14#include <linux/signal.h>
  15#include <linux/sched.h>
  16#include <linux/bug.h>
  17#include <linux/mm.h>
  18#include <linux/mmu_notifier.h>
  19#include <linux/preempt.h>
  20#include <linux/msi.h>
  21#include <linux/slab.h>
  22#include <linux/rcupdate.h>
  23#include <linux/ratelimit.h>
  24#include <asm/signal.h>
  25
  26#include <linux/kvm.h>
  27#include <linux/kvm_para.h>
  28
  29#include <linux/kvm_types.h>
  30
  31#include <asm/kvm_host.h>
  32
  33#ifndef KVM_MMIO_SIZE
  34#define KVM_MMIO_SIZE 8
  35#endif
  36
  37/*
  38 * If we support unaligned MMIO, at most one fragment will be split into two:
  39 */
  40#ifdef KVM_UNALIGNED_MMIO
  41#  define KVM_EXTRA_MMIO_FRAGMENTS 1
  42#else
  43#  define KVM_EXTRA_MMIO_FRAGMENTS 0
  44#endif
  45
  46#define KVM_USER_MMIO_SIZE 8
  47
  48#define KVM_MAX_MMIO_FRAGMENTS \
  49        (KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS)
  50
  51/*
  52 * vcpu->requests bit members
  53 */
  54#define KVM_REQ_TLB_FLUSH          0
  55#define KVM_REQ_MIGRATE_TIMER      1
  56#define KVM_REQ_REPORT_TPR_ACCESS  2
  57#define KVM_REQ_MMU_RELOAD         3
  58#define KVM_REQ_TRIPLE_FAULT       4
  59#define KVM_REQ_PENDING_TIMER      5
  60#define KVM_REQ_UNHALT             6
  61#define KVM_REQ_MMU_SYNC           7
  62#define KVM_REQ_CLOCK_UPDATE       8
  63#define KVM_REQ_KICK               9
  64#define KVM_REQ_DEACTIVATE_FPU    10
  65#define KVM_REQ_EVENT             11
  66#define KVM_REQ_APF_HALT          12
  67#define KVM_REQ_STEAL_UPDATE      13
  68#define KVM_REQ_NMI               14
  69#define KVM_REQ_IMMEDIATE_EXIT    15
  70#define KVM_REQ_PMU               16
  71#define KVM_REQ_PMI               17
  72
  73#define KVM_USERSPACE_IRQ_SOURCE_ID     0
  74
  75struct kvm;
  76struct kvm_vcpu;
  77extern struct kmem_cache *kvm_vcpu_cache;
  78
  79struct kvm_io_range {
  80        gpa_t addr;
  81        int len;
  82        struct kvm_io_device *dev;
  83};
  84
  85#define NR_IOBUS_DEVS 1000
  86
  87struct kvm_io_bus {
  88        int                   dev_count;
  89        struct kvm_io_range range[];
  90};
  91
  92enum kvm_bus {
  93        KVM_MMIO_BUS,
  94        KVM_PIO_BUS,
  95        KVM_NR_BUSES
  96};
  97
  98int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
  99                     int len, const void *val);
 100int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
 101                    void *val);
 102int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
 103                            int len, struct kvm_io_device *dev);
 104int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 105                              struct kvm_io_device *dev);
 106
 107#ifdef CONFIG_KVM_ASYNC_PF
 108struct kvm_async_pf {
 109        struct work_struct work;
 110        struct list_head link;
 111        struct list_head queue;
 112        struct kvm_vcpu *vcpu;
 113        struct mm_struct *mm;
 114        gva_t gva;
 115        unsigned long addr;
 116        struct kvm_arch_async_pf arch;
 117        struct page *page;
 118        bool done;
 119};
 120
 121void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
 122void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
 123int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
 124                       struct kvm_arch_async_pf *arch);
 125int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
 126#endif
 127
 128enum {
 129        OUTSIDE_GUEST_MODE,
 130        IN_GUEST_MODE,
 131        EXITING_GUEST_MODE,
 132        READING_SHADOW_PAGE_TABLES,
 133};
 134
 135/*
 136 * Sometimes a large or cross-page mmio needs to be broken up into separate
 137 * exits for userspace servicing.
 138 */
 139struct kvm_mmio_fragment {
 140        gpa_t gpa;
 141        void *data;
 142        unsigned len;
 143};
 144
 145struct kvm_vcpu {
 146        struct kvm *kvm;
 147#ifdef CONFIG_PREEMPT_NOTIFIERS
 148        struct preempt_notifier preempt_notifier;
 149#endif
 150        int cpu;
 151        int vcpu_id;
 152        int srcu_idx;
 153        int mode;
 154        unsigned long requests;
 155        unsigned long guest_debug;
 156
 157        struct mutex mutex;
 158        struct kvm_run *run;
 159
 160        int fpu_active;
 161        int guest_fpu_loaded, guest_xcr0_loaded;
 162        wait_queue_head_t wq;
 163        struct pid *pid;
 164        int sigset_active;
 165        sigset_t sigset;
 166        struct kvm_vcpu_stat stat;
 167
 168#ifdef CONFIG_HAS_IOMEM
 169        int mmio_needed;
 170        int mmio_read_completed;
 171        int mmio_is_write;
 172        int mmio_cur_fragment;
 173        int mmio_nr_fragments;
 174        struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
 175#endif
 176
 177#ifdef CONFIG_KVM_ASYNC_PF
 178        struct {
 179                u32 queued;
 180                struct list_head queue;
 181                struct list_head done;
 182                spinlock_t lock;
 183        } async_pf;
 184#endif
 185
 186        struct kvm_vcpu_arch arch;
 187};
 188
 189static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
 190{
 191        return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
 192}
 193
 194/*
 195 * Some of the bitops functions do not support too long bitmaps.
 196 * This number must be determined not to exceed such limits.
 197 */
 198#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
 199
 200struct kvm_memory_slot {
 201        gfn_t base_gfn;
 202        unsigned long npages;
 203        unsigned long flags;
 204        unsigned long *rmap;
 205        unsigned long *dirty_bitmap;
 206        struct kvm_arch_memory_slot arch;
 207        unsigned long userspace_addr;
 208        int user_alloc;
 209        int id;
 210};
 211
 212static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
 213{
 214        return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
 215}
 216
 217struct kvm_kernel_irq_routing_entry {
 218        u32 gsi;
 219        u32 type;
 220        int (*set)(struct kvm_kernel_irq_routing_entry *e,
 221                   struct kvm *kvm, int irq_source_id, int level);
 222        union {
 223                struct {
 224                        unsigned irqchip;
 225                        unsigned pin;
 226                } irqchip;
 227                struct msi_msg msi;
 228        };
 229        struct hlist_node link;
 230};
 231
 232#ifdef __KVM_HAVE_IOAPIC
 233
 234struct kvm_irq_routing_table {
 235        int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS];
 236        struct kvm_kernel_irq_routing_entry *rt_entries;
 237        u32 nr_rt_entries;
 238        /*
 239         * Array indexed by gsi. Each entry contains list of irq chips
 240         * the gsi is connected to.
 241         */
 242        struct hlist_head map[0];
 243};
 244
 245#else
 246
 247struct kvm_irq_routing_table {};
 248
 249#endif
 250
 251#ifndef KVM_MEM_SLOTS_NUM
 252#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
 253#endif
 254
 255/*
 256 * Note:
 257 * memslots are not sorted by id anymore, please use id_to_memslot()
 258 * to get the memslot by its id.
 259 */
 260struct kvm_memslots {
 261        u64 generation;
 262        struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
 263        /* The mapping table from slot id to the index in memslots[]. */
 264        int id_to_index[KVM_MEM_SLOTS_NUM];
 265};
 266
 267struct kvm {
 268        spinlock_t mmu_lock;
 269        struct mutex slots_lock;
 270        struct mm_struct *mm; /* userspace tied to this vm */
 271        struct kvm_memslots *memslots;
 272        struct srcu_struct srcu;
 273#ifdef CONFIG_KVM_APIC_ARCHITECTURE
 274        u32 bsp_vcpu_id;
 275#endif
 276        struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
 277        atomic_t online_vcpus;
 278        int last_boosted_vcpu;
 279        struct list_head vm_list;
 280        struct mutex lock;
 281        struct kvm_io_bus *buses[KVM_NR_BUSES];
 282#ifdef CONFIG_HAVE_KVM_EVENTFD
 283        struct {
 284                spinlock_t        lock;
 285                struct list_head  items;
 286        } irqfds;
 287        struct list_head ioeventfds;
 288#endif
 289        struct kvm_vm_stat stat;
 290        struct kvm_arch arch;
 291        atomic_t users_count;
 292#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
 293        struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
 294        spinlock_t ring_lock;
 295        struct list_head coalesced_zones;
 296#endif
 297
 298        struct mutex irq_lock;
 299#ifdef CONFIG_HAVE_KVM_IRQCHIP
 300        /*
 301         * Update side is protected by irq_lock and,
 302         * if configured, irqfds.lock.
 303         */
 304        struct kvm_irq_routing_table __rcu *irq_routing;
 305        struct hlist_head mask_notifier_list;
 306        struct hlist_head irq_ack_notifier_list;
 307#endif
 308
 309#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 310        struct mmu_notifier mmu_notifier;
 311        unsigned long mmu_notifier_seq;
 312        long mmu_notifier_count;
 313#endif
 314        long tlbs_dirty;
 315};
 316
 317#define kvm_err(fmt, ...) \
 318        pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
 319#define kvm_info(fmt, ...) \
 320        pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
 321#define kvm_debug(fmt, ...) \
 322        pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
 323#define kvm_pr_unimpl(fmt, ...) \
 324        pr_err_ratelimited("kvm [%i]: " fmt, \
 325                           task_tgid_nr(current), ## __VA_ARGS__)
 326
 327/* The guest did something we don't support. */
 328#define vcpu_unimpl(vcpu, fmt, ...)                                     \
 329        kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
 330
 331static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
 332{
 333        smp_rmb();
 334        return kvm->vcpus[i];
 335}
 336
 337#define kvm_for_each_vcpu(idx, vcpup, kvm) \
 338        for (idx = 0; \
 339             idx < atomic_read(&kvm->online_vcpus) && \
 340             (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
 341             idx++)
 342
 343#define kvm_for_each_memslot(memslot, slots)    \
 344        for (memslot = &slots->memslots[0];     \
 345              memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
 346                memslot++)
 347
 348int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
 349void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
 350
 351void vcpu_load(struct kvm_vcpu *vcpu);
 352void vcpu_put(struct kvm_vcpu *vcpu);
 353
 354int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 355                  struct module *module);
 356void kvm_exit(void);
 357
 358void kvm_get_kvm(struct kvm *kvm);
 359void kvm_put_kvm(struct kvm *kvm);
 360void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new);
 361
 362static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
 363{
 364        return rcu_dereference_check(kvm->memslots,
 365                        srcu_read_lock_held(&kvm->srcu)
 366                        || lockdep_is_held(&kvm->slots_lock));
 367}
 368
 369static inline struct kvm_memory_slot *
 370id_to_memslot(struct kvm_memslots *slots, int id)
 371{
 372        int index = slots->id_to_index[id];
 373        struct kvm_memory_slot *slot;
 374
 375        slot = &slots->memslots[index];
 376
 377        WARN_ON(slot->id != id);
 378        return slot;
 379}
 380
 381#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
 382#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
 383static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
 384
 385extern struct page *bad_page;
 386extern struct page *fault_page;
 387
 388extern pfn_t bad_pfn;
 389extern pfn_t fault_pfn;
 390
 391int is_error_page(struct page *page);
 392int is_error_pfn(pfn_t pfn);
 393int is_hwpoison_pfn(pfn_t pfn);
 394int is_fault_pfn(pfn_t pfn);
 395int is_noslot_pfn(pfn_t pfn);
 396int is_invalid_pfn(pfn_t pfn);
 397int kvm_is_error_hva(unsigned long addr);
 398int kvm_set_memory_region(struct kvm *kvm,
 399                          struct kvm_userspace_memory_region *mem,
 400                          int user_alloc);
 401int __kvm_set_memory_region(struct kvm *kvm,
 402                            struct kvm_userspace_memory_region *mem,
 403                            int user_alloc);
 404void kvm_arch_free_memslot(struct kvm_memory_slot *free,
 405                           struct kvm_memory_slot *dont);
 406int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
 407int kvm_arch_prepare_memory_region(struct kvm *kvm,
 408                                struct kvm_memory_slot *memslot,
 409                                struct kvm_memory_slot old,
 410                                struct kvm_userspace_memory_region *mem,
 411                                int user_alloc);
 412void kvm_arch_commit_memory_region(struct kvm *kvm,
 413                                struct kvm_userspace_memory_region *mem,
 414                                struct kvm_memory_slot old,
 415                                int user_alloc);
 416bool kvm_largepages_enabled(void);
 417void kvm_disable_largepages(void);
 418void kvm_arch_flush_shadow(struct kvm *kvm);
 419
 420int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
 421                            int nr_pages);
 422
 423struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
 424unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
 425void kvm_release_page_clean(struct page *page);
 426void kvm_release_page_dirty(struct page *page);
 427void kvm_set_page_dirty(struct page *page);
 428void kvm_set_page_accessed(struct page *page);
 429
 430pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr);
 431pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
 432pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
 433                       bool write_fault, bool *writable);
 434pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
 435pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
 436                      bool *writable);
 437pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
 438                         struct kvm_memory_slot *slot, gfn_t gfn);
 439void kvm_release_pfn_dirty(pfn_t);
 440void kvm_release_pfn_clean(pfn_t pfn);
 441void kvm_set_pfn_dirty(pfn_t pfn);
 442void kvm_set_pfn_accessed(pfn_t pfn);
 443void kvm_get_pfn(pfn_t pfn);
 444
 445int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
 446                        int len);
 447int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
 448                          unsigned long len);
 449int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
 450int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 451                           void *data, unsigned long len);
 452int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
 453                         int offset, int len);
 454int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
 455                    unsigned long len);
 456int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 457                           void *data, unsigned long len);
 458int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 459                              gpa_t gpa);
 460int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
 461int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
 462struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
 463int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
 464unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
 465void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
 466void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
 467                             gfn_t gfn);
 468
 469void kvm_vcpu_block(struct kvm_vcpu *vcpu);
 470void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 471bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
 472void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
 473void kvm_resched(struct kvm_vcpu *vcpu);
 474void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
 475void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
 476
 477void kvm_flush_remote_tlbs(struct kvm *kvm);
 478void kvm_reload_remote_mmus(struct kvm *kvm);
 479
 480long kvm_arch_dev_ioctl(struct file *filp,
 481                        unsigned int ioctl, unsigned long arg);
 482long kvm_arch_vcpu_ioctl(struct file *filp,
 483                         unsigned int ioctl, unsigned long arg);
 484int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
 485
 486int kvm_dev_ioctl_check_extension(long ext);
 487
 488int kvm_get_dirty_log(struct kvm *kvm,
 489                        struct kvm_dirty_log *log, int *is_dirty);
 490int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
 491                                struct kvm_dirty_log *log);
 492
 493int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
 494                                   struct
 495                                   kvm_userspace_memory_region *mem,
 496                                   int user_alloc);
 497long kvm_arch_vm_ioctl(struct file *filp,
 498                       unsigned int ioctl, unsigned long arg);
 499
 500int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
 501int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
 502
 503int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 504                                    struct kvm_translation *tr);
 505
 506int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
 507int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
 508int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 509                                  struct kvm_sregs *sregs);
 510int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 511                                  struct kvm_sregs *sregs);
 512int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 513                                    struct kvm_mp_state *mp_state);
 514int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 515                                    struct kvm_mp_state *mp_state);
 516int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 517                                        struct kvm_guest_debug *dbg);
 518int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
 519
 520int kvm_arch_init(void *opaque);
 521void kvm_arch_exit(void);
 522
 523int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
 524void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
 525
 526void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
 527void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 528void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
 529struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
 530int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
 531void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
 532
 533int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
 534int kvm_arch_hardware_enable(void *garbage);
 535void kvm_arch_hardware_disable(void *garbage);
 536int kvm_arch_hardware_setup(void);
 537void kvm_arch_hardware_unsetup(void);
 538void kvm_arch_check_processor_compat(void *rtn);
 539int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
 540int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
 541
 542void kvm_free_physmem(struct kvm *kvm);
 543
 544void *kvm_kvzalloc(unsigned long size);
 545void kvm_kvfree(const void *addr);
 546
 547#ifndef __KVM_HAVE_ARCH_VM_ALLOC
 548static inline struct kvm *kvm_arch_alloc_vm(void)
 549{
 550        return kzalloc(sizeof(struct kvm), GFP_KERNEL);
 551}
 552
 553static inline void kvm_arch_free_vm(struct kvm *kvm)
 554{
 555        kfree(kvm);
 556}
 557#endif
 558
 559static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
 560{
 561#ifdef __KVM_HAVE_ARCH_WQP
 562        return vcpu->arch.wqp;
 563#else
 564        return &vcpu->wq;
 565#endif
 566}
 567
 568int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
 569void kvm_arch_destroy_vm(struct kvm *kvm);
 570void kvm_free_all_assigned_devices(struct kvm *kvm);
 571void kvm_arch_sync_events(struct kvm *kvm);
 572
 573int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
 574void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 575
 576int kvm_is_mmio_pfn(pfn_t pfn);
 577
 578struct kvm_irq_ack_notifier {
 579        struct hlist_node link;
 580        unsigned gsi;
 581        void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
 582};
 583
 584struct kvm_assigned_dev_kernel {
 585        struct kvm_irq_ack_notifier ack_notifier;
 586        struct list_head list;
 587        int assigned_dev_id;
 588        int host_segnr;
 589        int host_busnr;
 590        int host_devfn;
 591        unsigned int entries_nr;
 592        int host_irq;
 593        bool host_irq_disabled;
 594        bool pci_2_3;
 595        struct msix_entry *host_msix_entries;
 596        int guest_irq;
 597        struct msix_entry *guest_msix_entries;
 598        unsigned long irq_requested_type;
 599        int irq_source_id;
 600        int flags;
 601        struct pci_dev *dev;
 602        struct kvm *kvm;
 603        spinlock_t intx_lock;
 604        spinlock_t intx_mask_lock;
 605        char irq_name[32];
 606        struct pci_saved_state *pci_saved_state;
 607};
 608
 609struct kvm_irq_mask_notifier {
 610        void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
 611        int irq;
 612        struct hlist_node link;
 613};
 614
 615void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
 616                                    struct kvm_irq_mask_notifier *kimn);
 617void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
 618                                      struct kvm_irq_mask_notifier *kimn);
 619void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
 620                             bool mask);
 621
 622#ifdef __KVM_HAVE_IOAPIC
 623void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
 624                                   union kvm_ioapic_redirect_entry *entry,
 625                                   unsigned long *deliver_bitmask);
 626#endif
 627int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
 628int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
 629                int irq_source_id, int level);
 630void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
 631void kvm_register_irq_ack_notifier(struct kvm *kvm,
 632                                   struct kvm_irq_ack_notifier *kian);
 633void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
 634                                   struct kvm_irq_ack_notifier *kian);
 635int kvm_request_irq_source_id(struct kvm *kvm);
 636void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
 637
 638/* For vcpu->arch.iommu_flags */
 639#define KVM_IOMMU_CACHE_COHERENCY       0x1
 640
 641#ifdef CONFIG_IOMMU_API
 642int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
 643void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
 644int kvm_iommu_map_guest(struct kvm *kvm);
 645int kvm_iommu_unmap_guest(struct kvm *kvm);
 646int kvm_assign_device(struct kvm *kvm,
 647                      struct kvm_assigned_dev_kernel *assigned_dev);
 648int kvm_deassign_device(struct kvm *kvm,
 649                        struct kvm_assigned_dev_kernel *assigned_dev);
 650#else /* CONFIG_IOMMU_API */
 651static inline int kvm_iommu_map_pages(struct kvm *kvm,
 652                                      struct kvm_memory_slot *slot)
 653{
 654        return 0;
 655}
 656
 657static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
 658                                         struct kvm_memory_slot *slot)
 659{
 660}
 661
 662static inline int kvm_iommu_map_guest(struct kvm *kvm)
 663{
 664        return -ENODEV;
 665}
 666
 667static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
 668{
 669        return 0;
 670}
 671
 672static inline int kvm_assign_device(struct kvm *kvm,
 673                struct kvm_assigned_dev_kernel *assigned_dev)
 674{
 675        return 0;
 676}
 677
 678static inline int kvm_deassign_device(struct kvm *kvm,
 679                struct kvm_assigned_dev_kernel *assigned_dev)
 680{
 681        return 0;
 682}
 683#endif /* CONFIG_IOMMU_API */
 684
 685static inline void kvm_guest_enter(void)
 686{
 687        BUG_ON(preemptible());
 688        account_system_vtime(current);
 689        current->flags |= PF_VCPU;
 690        /* KVM does not hold any references to rcu protected data when it
 691         * switches CPU into a guest mode. In fact switching to a guest mode
 692         * is very similar to exiting to userspase from rcu point of view. In
 693         * addition CPU may stay in a guest mode for quite a long time (up to
 694         * one time slice). Lets treat guest mode as quiescent state, just like
 695         * we do with user-mode execution.
 696         */
 697        rcu_virt_note_context_switch(smp_processor_id());
 698}
 699
 700static inline void kvm_guest_exit(void)
 701{
 702        account_system_vtime(current);
 703        current->flags &= ~PF_VCPU;
 704}
 705
 706/*
 707 * search_memslots() and __gfn_to_memslot() are here because they are
 708 * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
 709 * gfn_to_memslot() itself isn't here as an inline because that would
 710 * bloat other code too much.
 711 */
 712static inline struct kvm_memory_slot *
 713search_memslots(struct kvm_memslots *slots, gfn_t gfn)
 714{
 715        struct kvm_memory_slot *memslot;
 716
 717        kvm_for_each_memslot(memslot, slots)
 718                if (gfn >= memslot->base_gfn &&
 719                      gfn < memslot->base_gfn + memslot->npages)
 720                        return memslot;
 721
 722        return NULL;
 723}
 724
 725static inline struct kvm_memory_slot *
 726__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
 727{
 728        return search_memslots(slots, gfn);
 729}
 730
 731static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
 732{
 733        return gfn_to_memslot(kvm, gfn)->id;
 734}
 735
 736static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
 737{
 738        /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
 739        return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
 740                (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
 741}
 742
 743static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
 744                                               gfn_t gfn)
 745{
 746        return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
 747}
 748
 749static inline gpa_t gfn_to_gpa(gfn_t gfn)
 750{
 751        return (gpa_t)gfn << PAGE_SHIFT;
 752}
 753
 754static inline gfn_t gpa_to_gfn(gpa_t gpa)
 755{
 756        return (gfn_t)(gpa >> PAGE_SHIFT);
 757}
 758
 759static inline hpa_t pfn_to_hpa(pfn_t pfn)
 760{
 761        return (hpa_t)pfn << PAGE_SHIFT;
 762}
 763
 764static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
 765{
 766        set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
 767}
 768
 769enum kvm_stat_kind {
 770        KVM_STAT_VM,
 771        KVM_STAT_VCPU,
 772};
 773
 774struct kvm_stats_debugfs_item {
 775        const char *name;
 776        int offset;
 777        enum kvm_stat_kind kind;
 778        struct dentry *dentry;
 779};
 780extern struct kvm_stats_debugfs_item debugfs_entries[];
 781extern struct dentry *kvm_debugfs_dir;
 782
 783#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 784static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
 785{
 786        if (unlikely(vcpu->kvm->mmu_notifier_count))
 787                return 1;
 788        /*
 789         * Ensure the read of mmu_notifier_count happens before the read
 790         * of mmu_notifier_seq.  This interacts with the smp_wmb() in
 791         * mmu_notifier_invalidate_range_end to make sure that the caller
 792         * either sees the old (non-zero) value of mmu_notifier_count or
 793         * the new (incremented) value of mmu_notifier_seq.
 794         * PowerPC Book3s HV KVM calls this under a per-page lock
 795         * rather than under kvm->mmu_lock, for scalability, so
 796         * can't rely on kvm->mmu_lock to keep things ordered.
 797         */
 798        smp_rmb();
 799        if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
 800                return 1;
 801        return 0;
 802}
 803#endif
 804
 805#ifdef KVM_CAP_IRQ_ROUTING
 806
 807#define KVM_MAX_IRQ_ROUTES 1024
 808
 809int kvm_setup_default_irq_routing(struct kvm *kvm);
 810int kvm_set_irq_routing(struct kvm *kvm,
 811                        const struct kvm_irq_routing_entry *entries,
 812                        unsigned nr,
 813                        unsigned flags);
 814void kvm_free_irq_routing(struct kvm *kvm);
 815
 816int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
 817
 818#else
 819
 820static inline void kvm_free_irq_routing(struct kvm *kvm) {}
 821
 822#endif
 823
 824#ifdef CONFIG_HAVE_KVM_EVENTFD
 825
 826void kvm_eventfd_init(struct kvm *kvm);
 827int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
 828void kvm_irqfd_release(struct kvm *kvm);
 829void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
 830int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
 831
 832#else
 833
 834static inline void kvm_eventfd_init(struct kvm *kvm) {}
 835
 836static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
 837{
 838        return -EINVAL;
 839}
 840
 841static inline void kvm_irqfd_release(struct kvm *kvm) {}
 842
 843#ifdef CONFIG_HAVE_KVM_IRQCHIP
 844static inline void kvm_irq_routing_update(struct kvm *kvm,
 845                                          struct kvm_irq_routing_table *irq_rt)
 846{
 847        rcu_assign_pointer(kvm->irq_routing, irq_rt);
 848}
 849#endif
 850
 851static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
 852{
 853        return -ENOSYS;
 854}
 855
 856#endif /* CONFIG_HAVE_KVM_EVENTFD */
 857
 858#ifdef CONFIG_KVM_APIC_ARCHITECTURE
 859static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
 860{
 861        return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
 862}
 863
 864bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
 865
 866#else
 867
 868static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
 869
 870#endif
 871
 872#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
 873
 874long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
 875                                  unsigned long arg);
 876
 877#else
 878
 879static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
 880                                                unsigned long arg)
 881{
 882        return -ENOTTY;
 883}
 884
 885#endif
 886
 887static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
 888{
 889        set_bit(req, &vcpu->requests);
 890}
 891
 892static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
 893{
 894        if (test_bit(req, &vcpu->requests)) {
 895                clear_bit(req, &vcpu->requests);
 896                return true;
 897        } else {
 898                return false;
 899        }
 900}
 901
 902#endif
 903
 904
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.