linux/include/linux/kvm_host.h
<<
>>
Prefs
   1#ifndef __KVM_HOST_H
   2#define __KVM_HOST_H
   3
   4/*
   5 * This work is licensed under the terms of the GNU GPL, version 2.  See
   6 * the COPYING file in the top-level directory.
   7 */
   8
   9#include <linux/types.h>
  10#include <linux/hardirq.h>
  11#include <linux/list.h>
  12#include <linux/mutex.h>
  13#include <linux/spinlock.h>
  14#include <linux/signal.h>
  15#include <linux/sched.h>
  16#include <linux/bug.h>
  17#include <linux/mm.h>
  18#include <linux/mmu_notifier.h>
  19#include <linux/preempt.h>
  20#include <linux/msi.h>
  21#include <linux/slab.h>
  22#include <linux/rcupdate.h>
  23#include <linux/ratelimit.h>
  24#include <linux/err.h>
  25#include <asm/signal.h>
  26
  27#include <linux/kvm.h>
  28#include <linux/kvm_para.h>
  29
  30#include <linux/kvm_types.h>
  31
  32#include <asm/kvm_host.h>
  33
  34#ifndef KVM_MMIO_SIZE
  35#define KVM_MMIO_SIZE 8
  36#endif
  37
  38/*
  39 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
  40 * in kvm, other bits are visible for userspace which are defined in
  41 * include/linux/kvm_h.
  42 */
  43#define KVM_MEMSLOT_INVALID     (1UL << 16)
  44
  45/* Two fragments for cross MMIO pages. */
  46#define KVM_MAX_MMIO_FRAGMENTS  2
  47
  48/*
  49 * For the normal pfn, the highest 12 bits should be zero,
  50 * so we can mask bit 62 ~ bit 52  to indicate the error pfn,
  51 * mask bit 63 to indicate the noslot pfn.
  52 */
  53#define KVM_PFN_ERR_MASK        (0x7ffULL << 52)
  54#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
  55#define KVM_PFN_NOSLOT          (0x1ULL << 63)
  56
  57#define KVM_PFN_ERR_FAULT       (KVM_PFN_ERR_MASK)
  58#define KVM_PFN_ERR_HWPOISON    (KVM_PFN_ERR_MASK + 1)
  59#define KVM_PFN_ERR_RO_FAULT    (KVM_PFN_ERR_MASK + 2)
  60
  61/*
  62 * error pfns indicate that the gfn is in slot but faild to
  63 * translate it to pfn on host.
  64 */
  65static inline bool is_error_pfn(pfn_t pfn)
  66{
  67        return !!(pfn & KVM_PFN_ERR_MASK);
  68}
  69
  70/*
  71 * error_noslot pfns indicate that the gfn can not be
  72 * translated to pfn - it is not in slot or failed to
  73 * translate it to pfn.
  74 */
  75static inline bool is_error_noslot_pfn(pfn_t pfn)
  76{
  77        return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
  78}
  79
  80/* noslot pfn indicates that the gfn is not in slot. */
  81static inline bool is_noslot_pfn(pfn_t pfn)
  82{
  83        return pfn == KVM_PFN_NOSLOT;
  84}
  85
  86#define KVM_HVA_ERR_BAD         (PAGE_OFFSET)
  87#define KVM_HVA_ERR_RO_BAD      (PAGE_OFFSET + PAGE_SIZE)
  88
  89static inline bool kvm_is_error_hva(unsigned long addr)
  90{
  91        return addr >= PAGE_OFFSET;
  92}
  93
  94#define KVM_ERR_PTR_BAD_PAGE    (ERR_PTR(-ENOENT))
  95
  96static inline bool is_error_page(struct page *page)
  97{
  98        return IS_ERR(page);
  99}
 100
 101/*
 102 * vcpu->requests bit members
 103 */
 104#define KVM_REQ_TLB_FLUSH          0
 105#define KVM_REQ_MIGRATE_TIMER      1
 106#define KVM_REQ_REPORT_TPR_ACCESS  2
 107#define KVM_REQ_MMU_RELOAD         3
 108#define KVM_REQ_TRIPLE_FAULT       4
 109#define KVM_REQ_PENDING_TIMER      5
 110#define KVM_REQ_UNHALT             6
 111#define KVM_REQ_MMU_SYNC           7
 112#define KVM_REQ_CLOCK_UPDATE       8
 113#define KVM_REQ_KICK               9
 114#define KVM_REQ_DEACTIVATE_FPU    10
 115#define KVM_REQ_EVENT             11
 116#define KVM_REQ_APF_HALT          12
 117#define KVM_REQ_STEAL_UPDATE      13
 118#define KVM_REQ_NMI               14
 119#define KVM_REQ_IMMEDIATE_EXIT    15
 120#define KVM_REQ_PMU               16
 121#define KVM_REQ_PMI               17
 122#define KVM_REQ_WATCHDOG          18
 123#define KVM_REQ_MASTERCLOCK_UPDATE 19
 124#define KVM_REQ_MCLOCK_INPROGRESS 20
 125
 126#define KVM_USERSPACE_IRQ_SOURCE_ID             0
 127#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID        1
 128
 129struct kvm;
 130struct kvm_vcpu;
 131extern struct kmem_cache *kvm_vcpu_cache;
 132
 133struct kvm_io_range {
 134        gpa_t addr;
 135        int len;
 136        struct kvm_io_device *dev;
 137};
 138
 139#define NR_IOBUS_DEVS 1000
 140
 141struct kvm_io_bus {
 142        int                   dev_count;
 143        struct kvm_io_range range[];
 144};
 145
 146enum kvm_bus {
 147        KVM_MMIO_BUS,
 148        KVM_PIO_BUS,
 149        KVM_NR_BUSES
 150};
 151
 152int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
 153                     int len, const void *val);
 154int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
 155                    void *val);
 156int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
 157                            int len, struct kvm_io_device *dev);
 158int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 159                              struct kvm_io_device *dev);
 160
 161#ifdef CONFIG_KVM_ASYNC_PF
 162struct kvm_async_pf {
 163        struct work_struct work;
 164        struct list_head link;
 165        struct list_head queue;
 166        struct kvm_vcpu *vcpu;
 167        struct mm_struct *mm;
 168        gva_t gva;
 169        unsigned long addr;
 170        struct kvm_arch_async_pf arch;
 171        struct page *page;
 172        bool done;
 173};
 174
 175void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
 176void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
 177int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
 178                       struct kvm_arch_async_pf *arch);
 179int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
 180#endif
 181
 182enum {
 183        OUTSIDE_GUEST_MODE,
 184        IN_GUEST_MODE,
 185        EXITING_GUEST_MODE,
 186        READING_SHADOW_PAGE_TABLES,
 187};
 188
 189/*
 190 * Sometimes a large or cross-page mmio needs to be broken up into separate
 191 * exits for userspace servicing.
 192 */
 193struct kvm_mmio_fragment {
 194        gpa_t gpa;
 195        void *data;
 196        unsigned len;
 197};
 198
 199struct kvm_vcpu {
 200        struct kvm *kvm;
 201#ifdef CONFIG_PREEMPT_NOTIFIERS
 202        struct preempt_notifier preempt_notifier;
 203#endif
 204        int cpu;
 205        int vcpu_id;
 206        int srcu_idx;
 207        int mode;
 208        unsigned long requests;
 209        unsigned long guest_debug;
 210
 211        struct mutex mutex;
 212        struct kvm_run *run;
 213
 214        int fpu_active;
 215        int guest_fpu_loaded, guest_xcr0_loaded;
 216        wait_queue_head_t wq;
 217        struct pid *pid;
 218        int sigset_active;
 219        sigset_t sigset;
 220        struct kvm_vcpu_stat stat;
 221
 222#ifdef CONFIG_HAS_IOMEM
 223        int mmio_needed;
 224        int mmio_read_completed;
 225        int mmio_is_write;
 226        int mmio_cur_fragment;
 227        int mmio_nr_fragments;
 228        struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
 229#endif
 230
 231#ifdef CONFIG_KVM_ASYNC_PF
 232        struct {
 233                u32 queued;
 234                struct list_head queue;
 235                struct list_head done;
 236                spinlock_t lock;
 237        } async_pf;
 238#endif
 239
 240#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
 241        /*
 242         * Cpu relax intercept or pause loop exit optimization
 243         * in_spin_loop: set when a vcpu does a pause loop exit
 244         *  or cpu relax intercepted.
 245         * dy_eligible: indicates whether vcpu is eligible for directed yield.
 246         */
 247        struct {
 248                bool in_spin_loop;
 249                bool dy_eligible;
 250        } spin_loop;
 251#endif
 252        struct kvm_vcpu_arch arch;
 253};
 254
 255static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
 256{
 257        return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
 258}
 259
 260/*
 261 * Some of the bitops functions do not support too long bitmaps.
 262 * This number must be determined not to exceed such limits.
 263 */
 264#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
 265
 266struct kvm_memory_slot {
 267        gfn_t base_gfn;
 268        unsigned long npages;
 269        unsigned long flags;
 270        unsigned long *dirty_bitmap;
 271        struct kvm_arch_memory_slot arch;
 272        unsigned long userspace_addr;
 273        int user_alloc;
 274        int id;
 275};
 276
 277static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
 278{
 279        return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
 280}
 281
 282struct kvm_kernel_irq_routing_entry {
 283        u32 gsi;
 284        u32 type;
 285        int (*set)(struct kvm_kernel_irq_routing_entry *e,
 286                   struct kvm *kvm, int irq_source_id, int level);
 287        union {
 288                struct {
 289                        unsigned irqchip;
 290                        unsigned pin;
 291                } irqchip;
 292                struct msi_msg msi;
 293        };
 294        struct hlist_node link;
 295};
 296
 297#ifdef __KVM_HAVE_IOAPIC
 298
 299struct kvm_irq_routing_table {
 300        int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS];
 301        struct kvm_kernel_irq_routing_entry *rt_entries;
 302        u32 nr_rt_entries;
 303        /*
 304         * Array indexed by gsi. Each entry contains list of irq chips
 305         * the gsi is connected to.
 306         */
 307        struct hlist_head map[0];
 308};
 309
 310#else
 311
 312struct kvm_irq_routing_table {};
 313
 314#endif
 315
 316#ifndef KVM_MEM_SLOTS_NUM
 317#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
 318#endif
 319
 320/*
 321 * Note:
 322 * memslots are not sorted by id anymore, please use id_to_memslot()
 323 * to get the memslot by its id.
 324 */
 325struct kvm_memslots {
 326        u64 generation;
 327        struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
 328        /* The mapping table from slot id to the index in memslots[]. */
 329        int id_to_index[KVM_MEM_SLOTS_NUM];
 330};
 331
 332struct kvm {
 333        spinlock_t mmu_lock;
 334        struct mutex slots_lock;
 335        struct mm_struct *mm; /* userspace tied to this vm */
 336        struct kvm_memslots *memslots;
 337        struct srcu_struct srcu;
 338#ifdef CONFIG_KVM_APIC_ARCHITECTURE
 339        u32 bsp_vcpu_id;
 340#endif
 341        struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
 342        atomic_t online_vcpus;
 343        int last_boosted_vcpu;
 344        struct list_head vm_list;
 345        struct mutex lock;
 346        struct kvm_io_bus *buses[KVM_NR_BUSES];
 347#ifdef CONFIG_HAVE_KVM_EVENTFD
 348        struct {
 349                spinlock_t        lock;
 350                struct list_head  items;
 351                struct list_head  resampler_list;
 352                struct mutex      resampler_lock;
 353        } irqfds;
 354        struct list_head ioeventfds;
 355#endif
 356        struct kvm_vm_stat stat;
 357        struct kvm_arch arch;
 358        atomic_t users_count;
 359#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
 360        struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
 361        spinlock_t ring_lock;
 362        struct list_head coalesced_zones;
 363#endif
 364
 365        struct mutex irq_lock;
 366#ifdef CONFIG_HAVE_KVM_IRQCHIP
 367        /*
 368         * Update side is protected by irq_lock and,
 369         * if configured, irqfds.lock.
 370         */
 371        struct kvm_irq_routing_table __rcu *irq_routing;
 372        struct hlist_head mask_notifier_list;
 373        struct hlist_head irq_ack_notifier_list;
 374#endif
 375
 376#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 377        struct mmu_notifier mmu_notifier;
 378        unsigned long mmu_notifier_seq;
 379        long mmu_notifier_count;
 380#endif
 381        long tlbs_dirty;
 382};
 383
 384#define kvm_err(fmt, ...) \
 385        pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
 386#define kvm_info(fmt, ...) \
 387        pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
 388#define kvm_debug(fmt, ...) \
 389        pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
 390#define kvm_pr_unimpl(fmt, ...) \
 391        pr_err_ratelimited("kvm [%i]: " fmt, \
 392                           task_tgid_nr(current), ## __VA_ARGS__)
 393
 394/* The guest did something we don't support. */
 395#define vcpu_unimpl(vcpu, fmt, ...)                                     \
 396        kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
 397
 398static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
 399{
 400        smp_rmb();
 401        return kvm->vcpus[i];
 402}
 403
 404#define kvm_for_each_vcpu(idx, vcpup, kvm) \
 405        for (idx = 0; \
 406             idx < atomic_read(&kvm->online_vcpus) && \
 407             (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
 408             idx++)
 409
 410#define kvm_for_each_memslot(memslot, slots)    \
 411        for (memslot = &slots->memslots[0];     \
 412              memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
 413                memslot++)
 414
 415int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
 416void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
 417
 418int __must_check vcpu_load(struct kvm_vcpu *vcpu);
 419void vcpu_put(struct kvm_vcpu *vcpu);
 420
 421int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
 422                  struct module *module);
 423void kvm_exit(void);
 424
 425void kvm_get_kvm(struct kvm *kvm);
 426void kvm_put_kvm(struct kvm *kvm);
 427void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new);
 428
 429static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
 430{
 431        return rcu_dereference_check(kvm->memslots,
 432                        srcu_read_lock_held(&kvm->srcu)
 433                        || lockdep_is_held(&kvm->slots_lock));
 434}
 435
 436static inline struct kvm_memory_slot *
 437id_to_memslot(struct kvm_memslots *slots, int id)
 438{
 439        int index = slots->id_to_index[id];
 440        struct kvm_memory_slot *slot;
 441
 442        slot = &slots->memslots[index];
 443
 444        WARN_ON(slot->id != id);
 445        return slot;
 446}
 447
 448int kvm_set_memory_region(struct kvm *kvm,
 449                          struct kvm_userspace_memory_region *mem,
 450                          int user_alloc);
 451int __kvm_set_memory_region(struct kvm *kvm,
 452                            struct kvm_userspace_memory_region *mem,
 453                            int user_alloc);
 454void kvm_arch_free_memslot(struct kvm_memory_slot *free,
 455                           struct kvm_memory_slot *dont);
 456int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
 457int kvm_arch_prepare_memory_region(struct kvm *kvm,
 458                                struct kvm_memory_slot *memslot,
 459                                struct kvm_memory_slot old,
 460                                struct kvm_userspace_memory_region *mem,
 461                                int user_alloc);
 462void kvm_arch_commit_memory_region(struct kvm *kvm,
 463                                struct kvm_userspace_memory_region *mem,
 464                                struct kvm_memory_slot old,
 465                                int user_alloc);
 466bool kvm_largepages_enabled(void);
 467void kvm_disable_largepages(void);
 468/* flush all memory translations */
 469void kvm_arch_flush_shadow_all(struct kvm *kvm);
 470/* flush memory translations pointing to 'slot' */
 471void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 472                                   struct kvm_memory_slot *slot);
 473
 474int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
 475                            int nr_pages);
 476
 477struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
 478unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
 479unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
 480void kvm_release_page_clean(struct page *page);
 481void kvm_release_page_dirty(struct page *page);
 482void kvm_set_page_dirty(struct page *page);
 483void kvm_set_page_accessed(struct page *page);
 484
 485pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
 486pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
 487                       bool write_fault, bool *writable);
 488pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
 489pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
 490                      bool *writable);
 491pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
 492pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
 493
 494void kvm_release_pfn_dirty(pfn_t pfn);
 495void kvm_release_pfn_clean(pfn_t pfn);
 496void kvm_set_pfn_dirty(pfn_t pfn);
 497void kvm_set_pfn_accessed(pfn_t pfn);
 498void kvm_get_pfn(pfn_t pfn);
 499
 500int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
 501                        int len);
 502int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
 503                          unsigned long len);
 504int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
 505int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 506                           void *data, unsigned long len);
 507int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
 508                         int offset, int len);
 509int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
 510                    unsigned long len);
 511int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 512                           void *data, unsigned long len);
 513int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 514                              gpa_t gpa);
 515int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
 516int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
 517struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
 518int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
 519unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
 520void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
 521void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
 522                             gfn_t gfn);
 523
 524void kvm_vcpu_block(struct kvm_vcpu *vcpu);
 525void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 526bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
 527void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
 528void kvm_resched(struct kvm_vcpu *vcpu);
 529void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
 530void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
 531
 532void kvm_flush_remote_tlbs(struct kvm *kvm);
 533void kvm_reload_remote_mmus(struct kvm *kvm);
 534void kvm_make_mclock_inprogress_request(struct kvm *kvm);
 535
 536long kvm_arch_dev_ioctl(struct file *filp,
 537                        unsigned int ioctl, unsigned long arg);
 538long kvm_arch_vcpu_ioctl(struct file *filp,
 539                         unsigned int ioctl, unsigned long arg);
 540int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
 541
 542int kvm_dev_ioctl_check_extension(long ext);
 543
 544int kvm_get_dirty_log(struct kvm *kvm,
 545                        struct kvm_dirty_log *log, int *is_dirty);
 546int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
 547                                struct kvm_dirty_log *log);
 548
 549int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
 550                                   struct
 551                                   kvm_userspace_memory_region *mem,
 552                                   int user_alloc);
 553int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level);
 554long kvm_arch_vm_ioctl(struct file *filp,
 555                       unsigned int ioctl, unsigned long arg);
 556
 557int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
 558int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
 559
 560int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 561                                    struct kvm_translation *tr);
 562
 563int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
 564int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
 565int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 566                                  struct kvm_sregs *sregs);
 567int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 568                                  struct kvm_sregs *sregs);
 569int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 570                                    struct kvm_mp_state *mp_state);
 571int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 572                                    struct kvm_mp_state *mp_state);
 573int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 574                                        struct kvm_guest_debug *dbg);
 575int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
 576
 577int kvm_arch_init(void *opaque);
 578void kvm_arch_exit(void);
 579
 580int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
 581void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
 582
 583void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
 584void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 585void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
 586struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
 587int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
 588int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
 589void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
 590
 591int kvm_arch_hardware_enable(void *garbage);
 592void kvm_arch_hardware_disable(void *garbage);
 593int kvm_arch_hardware_setup(void);
 594void kvm_arch_hardware_unsetup(void);
 595void kvm_arch_check_processor_compat(void *rtn);
 596int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
 597int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
 598
 599void kvm_free_physmem(struct kvm *kvm);
 600
 601void *kvm_kvzalloc(unsigned long size);
 602void kvm_kvfree(const void *addr);
 603
 604#ifndef __KVM_HAVE_ARCH_VM_ALLOC
 605static inline struct kvm *kvm_arch_alloc_vm(void)
 606{
 607        return kzalloc(sizeof(struct kvm), GFP_KERNEL);
 608}
 609
 610static inline void kvm_arch_free_vm(struct kvm *kvm)
 611{
 612        kfree(kvm);
 613}
 614#endif
 615
 616static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
 617{
 618#ifdef __KVM_HAVE_ARCH_WQP
 619        return vcpu->arch.wqp;
 620#else
 621        return &vcpu->wq;
 622#endif
 623}
 624
 625int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
 626void kvm_arch_destroy_vm(struct kvm *kvm);
 627void kvm_free_all_assigned_devices(struct kvm *kvm);
 628void kvm_arch_sync_events(struct kvm *kvm);
 629
 630int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
 631void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 632
 633bool kvm_is_mmio_pfn(pfn_t pfn);
 634
 635struct kvm_irq_ack_notifier {
 636        struct hlist_node link;
 637        unsigned gsi;
 638        void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
 639};
 640
 641struct kvm_assigned_dev_kernel {
 642        struct kvm_irq_ack_notifier ack_notifier;
 643        struct list_head list;
 644        int assigned_dev_id;
 645        int host_segnr;
 646        int host_busnr;
 647        int host_devfn;
 648        unsigned int entries_nr;
 649        int host_irq;
 650        bool host_irq_disabled;
 651        bool pci_2_3;
 652        struct msix_entry *host_msix_entries;
 653        int guest_irq;
 654        struct msix_entry *guest_msix_entries;
 655        unsigned long irq_requested_type;
 656        int irq_source_id;
 657        int flags;
 658        struct pci_dev *dev;
 659        struct kvm *kvm;
 660        spinlock_t intx_lock;
 661        spinlock_t intx_mask_lock;
 662        char irq_name[32];
 663        struct pci_saved_state *pci_saved_state;
 664};
 665
 666struct kvm_irq_mask_notifier {
 667        void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
 668        int irq;
 669        struct hlist_node link;
 670};
 671
 672void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
 673                                    struct kvm_irq_mask_notifier *kimn);
 674void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
 675                                      struct kvm_irq_mask_notifier *kimn);
 676void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
 677                             bool mask);
 678
 679#ifdef __KVM_HAVE_IOAPIC
 680void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
 681                                   union kvm_ioapic_redirect_entry *entry,
 682                                   unsigned long *deliver_bitmask);
 683#endif
 684int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
 685int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
 686int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
 687                int irq_source_id, int level);
 688void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
 689void kvm_register_irq_ack_notifier(struct kvm *kvm,
 690                                   struct kvm_irq_ack_notifier *kian);
 691void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
 692                                   struct kvm_irq_ack_notifier *kian);
 693int kvm_request_irq_source_id(struct kvm *kvm);
 694void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
 695
 696/* For vcpu->arch.iommu_flags */
 697#define KVM_IOMMU_CACHE_COHERENCY       0x1
 698
 699#ifdef CONFIG_IOMMU_API
 700int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
 701void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
 702int kvm_iommu_map_guest(struct kvm *kvm);
 703int kvm_iommu_unmap_guest(struct kvm *kvm);
 704int kvm_assign_device(struct kvm *kvm,
 705                      struct kvm_assigned_dev_kernel *assigned_dev);
 706int kvm_deassign_device(struct kvm *kvm,
 707                        struct kvm_assigned_dev_kernel *assigned_dev);
 708#else /* CONFIG_IOMMU_API */
 709static inline int kvm_iommu_map_pages(struct kvm *kvm,
 710                                      struct kvm_memory_slot *slot)
 711{
 712        return 0;
 713}
 714
 715static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
 716                                         struct kvm_memory_slot *slot)
 717{
 718}
 719
 720static inline int kvm_iommu_map_guest(struct kvm *kvm)
 721{
 722        return -ENODEV;
 723}
 724
 725static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
 726{
 727        return 0;
 728}
 729
 730static inline int kvm_assign_device(struct kvm *kvm,
 731                struct kvm_assigned_dev_kernel *assigned_dev)
 732{
 733        return 0;
 734}
 735
 736static inline int kvm_deassign_device(struct kvm *kvm,
 737                struct kvm_assigned_dev_kernel *assigned_dev)
 738{
 739        return 0;
 740}
 741#endif /* CONFIG_IOMMU_API */
 742
 743static inline void kvm_guest_enter(void)
 744{
 745        BUG_ON(preemptible());
 746        /*
 747         * This is running in ioctl context so we can avoid
 748         * the call to vtime_account() with its unnecessary idle check.
 749         */
 750        vtime_account_system_irqsafe(current);
 751        current->flags |= PF_VCPU;
 752        /* KVM does not hold any references to rcu protected data when it
 753         * switches CPU into a guest mode. In fact switching to a guest mode
 754         * is very similar to exiting to userspase from rcu point of view. In
 755         * addition CPU may stay in a guest mode for quite a long time (up to
 756         * one time slice). Lets treat guest mode as quiescent state, just like
 757         * we do with user-mode execution.
 758         */
 759        rcu_virt_note_context_switch(smp_processor_id());
 760}
 761
 762static inline void kvm_guest_exit(void)
 763{
 764        /*
 765         * This is running in ioctl context so we can avoid
 766         * the call to vtime_account() with its unnecessary idle check.
 767         */
 768        vtime_account_system_irqsafe(current);
 769        current->flags &= ~PF_VCPU;
 770}
 771
 772/*
 773 * search_memslots() and __gfn_to_memslot() are here because they are
 774 * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
 775 * gfn_to_memslot() itself isn't here as an inline because that would
 776 * bloat other code too much.
 777 */
 778static inline struct kvm_memory_slot *
 779search_memslots(struct kvm_memslots *slots, gfn_t gfn)
 780{
 781        struct kvm_memory_slot *memslot;
 782
 783        kvm_for_each_memslot(memslot, slots)
 784                if (gfn >= memslot->base_gfn &&
 785                      gfn < memslot->base_gfn + memslot->npages)
 786                        return memslot;
 787
 788        return NULL;
 789}
 790
 791static inline struct kvm_memory_slot *
 792__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
 793{
 794        return search_memslots(slots, gfn);
 795}
 796
 797static inline unsigned long
 798__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
 799{
 800        return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
 801}
 802
 803static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
 804{
 805        return gfn_to_memslot(kvm, gfn)->id;
 806}
 807
 808static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
 809{
 810        /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
 811        return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
 812                (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
 813}
 814
 815static inline gfn_t
 816hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
 817{
 818        gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
 819
 820        return slot->base_gfn + gfn_offset;
 821}
 822
 823static inline gpa_t gfn_to_gpa(gfn_t gfn)
 824{
 825        return (gpa_t)gfn << PAGE_SHIFT;
 826}
 827
 828static inline gfn_t gpa_to_gfn(gpa_t gpa)
 829{
 830        return (gfn_t)(gpa >> PAGE_SHIFT);
 831}
 832
 833static inline hpa_t pfn_to_hpa(pfn_t pfn)
 834{
 835        return (hpa_t)pfn << PAGE_SHIFT;
 836}
 837
 838static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
 839{
 840        set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
 841}
 842
 843enum kvm_stat_kind {
 844        KVM_STAT_VM,
 845        KVM_STAT_VCPU,
 846};
 847
 848struct kvm_stats_debugfs_item {
 849        const char *name;
 850        int offset;
 851        enum kvm_stat_kind kind;
 852        struct dentry *dentry;
 853};
 854extern struct kvm_stats_debugfs_item debugfs_entries[];
 855extern struct dentry *kvm_debugfs_dir;
 856
 857#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 858static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
 859{
 860        if (unlikely(kvm->mmu_notifier_count))
 861                return 1;
 862        /*
 863         * Ensure the read of mmu_notifier_count happens before the read
 864         * of mmu_notifier_seq.  This interacts with the smp_wmb() in
 865         * mmu_notifier_invalidate_range_end to make sure that the caller
 866         * either sees the old (non-zero) value of mmu_notifier_count or
 867         * the new (incremented) value of mmu_notifier_seq.
 868         * PowerPC Book3s HV KVM calls this under a per-page lock
 869         * rather than under kvm->mmu_lock, for scalability, so
 870         * can't rely on kvm->mmu_lock to keep things ordered.
 871         */
 872        smp_rmb();
 873        if (kvm->mmu_notifier_seq != mmu_seq)
 874                return 1;
 875        return 0;
 876}
 877#endif
 878
 879#ifdef KVM_CAP_IRQ_ROUTING
 880
 881#define KVM_MAX_IRQ_ROUTES 1024
 882
 883int kvm_setup_default_irq_routing(struct kvm *kvm);
 884int kvm_set_irq_routing(struct kvm *kvm,
 885                        const struct kvm_irq_routing_entry *entries,
 886                        unsigned nr,
 887                        unsigned flags);
 888void kvm_free_irq_routing(struct kvm *kvm);
 889
 890int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
 891
 892#else
 893
 894static inline void kvm_free_irq_routing(struct kvm *kvm) {}
 895
 896#endif
 897
 898#ifdef CONFIG_HAVE_KVM_EVENTFD
 899
 900void kvm_eventfd_init(struct kvm *kvm);
 901int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
 902
 903#ifdef CONFIG_HAVE_KVM_IRQCHIP
 904int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
 905void kvm_irqfd_release(struct kvm *kvm);
 906void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
 907#else
 908static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
 909{
 910        return -EINVAL;
 911}
 912
 913static inline void kvm_irqfd_release(struct kvm *kvm) {}
 914#endif
 915
 916#else
 917
 918static inline void kvm_eventfd_init(struct kvm *kvm) {}
 919
 920static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
 921{
 922        return -EINVAL;
 923}
 924
 925static inline void kvm_irqfd_release(struct kvm *kvm) {}
 926
 927#ifdef CONFIG_HAVE_KVM_IRQCHIP
 928static inline void kvm_irq_routing_update(struct kvm *kvm,
 929                                          struct kvm_irq_routing_table *irq_rt)
 930{
 931        rcu_assign_pointer(kvm->irq_routing, irq_rt);
 932}
 933#endif
 934
 935static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
 936{
 937        return -ENOSYS;
 938}
 939
 940#endif /* CONFIG_HAVE_KVM_EVENTFD */
 941
 942#ifdef CONFIG_KVM_APIC_ARCHITECTURE
 943static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
 944{
 945        return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
 946}
 947
 948bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
 949
 950#else
 951
 952static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
 953
 954#endif
 955
 956#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
 957
 958long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
 959                                  unsigned long arg);
 960
 961#else
 962
 963static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
 964                                                unsigned long arg)
 965{
 966        return -ENOTTY;
 967}
 968
 969#endif
 970
 971static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
 972{
 973        set_bit(req, &vcpu->requests);
 974}
 975
 976static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
 977{
 978        if (test_bit(req, &vcpu->requests)) {
 979                clear_bit(req, &vcpu->requests);
 980                return true;
 981        } else {
 982                return false;
 983        }
 984}
 985
 986#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
 987
 988static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
 989{
 990        vcpu->spin_loop.in_spin_loop = val;
 991}
 992static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
 993{
 994        vcpu->spin_loop.dy_eligible = val;
 995}
 996
 997#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
 998
 999static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1000{
1001}
1002
1003static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1004{
1005}
1006
1007static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
1008{
1009        return true;
1010}
1011
1012#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1013#endif
1014
1015
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.