linux/drivers/vhost/vhost.c
<<
>>
Prefs
   1/* Copyright (C) 2009 Red Hat, Inc.
   2 * Copyright (C) 2006 Rusty Russell IBM Corporation
   3 *
   4 * Author: Michael S. Tsirkin <mst@redhat.com>
   5 *
   6 * Inspiration, some code, and most witty comments come from
   7 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.
  10 *
  11 * Generic code for virtio server in host kernel.
  12 */
  13
  14#include <linux/eventfd.h>
  15#include <linux/vhost.h>
  16#include <linux/socket.h> /* memcpy_fromiovec */
  17#include <linux/mm.h>
  18#include <linux/mmu_context.h>
  19#include <linux/miscdevice.h>
  20#include <linux/mutex.h>
  21#include <linux/rcupdate.h>
  22#include <linux/poll.h>
  23#include <linux/file.h>
  24#include <linux/highmem.h>
  25#include <linux/slab.h>
  26#include <linux/kthread.h>
  27#include <linux/cgroup.h>
  28#include <linux/module.h>
  29
  30#include "vhost.h"
  31
  32enum {
  33        VHOST_MEMORY_MAX_NREGIONS = 64,
  34        VHOST_MEMORY_F_LOG = 0x1,
  35};
  36
  37#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num])
  38#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num])
  39
  40static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
  41                            poll_table *pt)
  42{
  43        struct vhost_poll *poll;
  44
  45        poll = container_of(pt, struct vhost_poll, table);
  46        poll->wqh = wqh;
  47        add_wait_queue(wqh, &poll->wait);
  48}
  49
  50static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
  51                             void *key)
  52{
  53        struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
  54
  55        if (!((unsigned long)key & poll->mask))
  56                return 0;
  57
  58        vhost_poll_queue(poll);
  59        return 0;
  60}
  61
  62void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
  63{
  64        INIT_LIST_HEAD(&work->node);
  65        work->fn = fn;
  66        init_waitqueue_head(&work->done);
  67        work->flushing = 0;
  68        work->queue_seq = work->done_seq = 0;
  69}
  70EXPORT_SYMBOL_GPL(vhost_work_init);
  71
  72/* Init poll structure */
  73void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
  74                     unsigned long mask, struct vhost_dev *dev)
  75{
  76        init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
  77        init_poll_funcptr(&poll->table, vhost_poll_func);
  78        poll->mask = mask;
  79        poll->dev = dev;
  80        poll->wqh = NULL;
  81
  82        vhost_work_init(&poll->work, fn);
  83}
  84EXPORT_SYMBOL_GPL(vhost_poll_init);
  85
  86/* Start polling a file. We add ourselves to file's wait queue. The caller must
  87 * keep a reference to a file until after vhost_poll_stop is called. */
  88int vhost_poll_start(struct vhost_poll *poll, struct file *file)
  89{
  90        unsigned long mask;
  91        int ret = 0;
  92
  93        if (poll->wqh)
  94                return 0;
  95
  96        mask = file->f_op->poll(file, &poll->table);
  97        if (mask)
  98                vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
  99        if (mask & POLLERR) {
 100                if (poll->wqh)
 101                        remove_wait_queue(poll->wqh, &poll->wait);
 102                ret = -EINVAL;
 103        }
 104
 105        return ret;
 106}
 107EXPORT_SYMBOL_GPL(vhost_poll_start);
 108
 109/* Stop polling a file. After this function returns, it becomes safe to drop the
 110 * file reference. You must also flush afterwards. */
 111void vhost_poll_stop(struct vhost_poll *poll)
 112{
 113        if (poll->wqh) {
 114                remove_wait_queue(poll->wqh, &poll->wait);
 115                poll->wqh = NULL;
 116        }
 117}
 118EXPORT_SYMBOL_GPL(vhost_poll_stop);
 119
 120static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
 121                                unsigned seq)
 122{
 123        int left;
 124
 125        spin_lock_irq(&dev->work_lock);
 126        left = seq - work->done_seq;
 127        spin_unlock_irq(&dev->work_lock);
 128        return left <= 0;
 129}
 130
 131void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
 132{
 133        unsigned seq;
 134        int flushing;
 135
 136        spin_lock_irq(&dev->work_lock);
 137        seq = work->queue_seq;
 138        work->flushing++;
 139        spin_unlock_irq(&dev->work_lock);
 140        wait_event(work->done, vhost_work_seq_done(dev, work, seq));
 141        spin_lock_irq(&dev->work_lock);
 142        flushing = --work->flushing;
 143        spin_unlock_irq(&dev->work_lock);
 144        BUG_ON(flushing < 0);
 145}
 146EXPORT_SYMBOL_GPL(vhost_work_flush);
 147
 148/* Flush any work that has been scheduled. When calling this, don't hold any
 149 * locks that are also used by the callback. */
 150void vhost_poll_flush(struct vhost_poll *poll)
 151{
 152        vhost_work_flush(poll->dev, &poll->work);
 153}
 154EXPORT_SYMBOL_GPL(vhost_poll_flush);
 155
 156void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
 157{
 158        unsigned long flags;
 159
 160        spin_lock_irqsave(&dev->work_lock, flags);
 161        if (list_empty(&work->node)) {
 162                list_add_tail(&work->node, &dev->work_list);
 163                work->queue_seq++;
 164                wake_up_process(dev->worker);
 165        }
 166        spin_unlock_irqrestore(&dev->work_lock, flags);
 167}
 168EXPORT_SYMBOL_GPL(vhost_work_queue);
 169
 170void vhost_poll_queue(struct vhost_poll *poll)
 171{
 172        vhost_work_queue(poll->dev, &poll->work);
 173}
 174EXPORT_SYMBOL_GPL(vhost_poll_queue);
 175
 176static void vhost_vq_reset(struct vhost_dev *dev,
 177                           struct vhost_virtqueue *vq)
 178{
 179        vq->num = 1;
 180        vq->desc = NULL;
 181        vq->avail = NULL;
 182        vq->used = NULL;
 183        vq->last_avail_idx = 0;
 184        vq->avail_idx = 0;
 185        vq->last_used_idx = 0;
 186        vq->signalled_used = 0;
 187        vq->signalled_used_valid = false;
 188        vq->used_flags = 0;
 189        vq->log_used = false;
 190        vq->log_addr = -1ull;
 191        vq->private_data = NULL;
 192        vq->log_base = NULL;
 193        vq->error_ctx = NULL;
 194        vq->error = NULL;
 195        vq->kick = NULL;
 196        vq->call_ctx = NULL;
 197        vq->call = NULL;
 198        vq->log_ctx = NULL;
 199}
 200
 201static int vhost_worker(void *data)
 202{
 203        struct vhost_dev *dev = data;
 204        struct vhost_work *work = NULL;
 205        unsigned uninitialized_var(seq);
 206        mm_segment_t oldfs = get_fs();
 207
 208        set_fs(USER_DS);
 209        use_mm(dev->mm);
 210
 211        for (;;) {
 212                /* mb paired w/ kthread_stop */
 213                set_current_state(TASK_INTERRUPTIBLE);
 214
 215                spin_lock_irq(&dev->work_lock);
 216                if (work) {
 217                        work->done_seq = seq;
 218                        if (work->flushing)
 219                                wake_up_all(&work->done);
 220                }
 221
 222                if (kthread_should_stop()) {
 223                        spin_unlock_irq(&dev->work_lock);
 224                        __set_current_state(TASK_RUNNING);
 225                        break;
 226                }
 227                if (!list_empty(&dev->work_list)) {
 228                        work = list_first_entry(&dev->work_list,
 229                                                struct vhost_work, node);
 230                        list_del_init(&work->node);
 231                        seq = work->queue_seq;
 232                } else
 233                        work = NULL;
 234                spin_unlock_irq(&dev->work_lock);
 235
 236                if (work) {
 237                        __set_current_state(TASK_RUNNING);
 238                        work->fn(work);
 239                        if (need_resched())
 240                                schedule();
 241                } else
 242                        schedule();
 243
 244        }
 245        unuse_mm(dev->mm);
 246        set_fs(oldfs);
 247        return 0;
 248}
 249
 250static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
 251{
 252        kfree(vq->indirect);
 253        vq->indirect = NULL;
 254        kfree(vq->log);
 255        vq->log = NULL;
 256        kfree(vq->heads);
 257        vq->heads = NULL;
 258}
 259
 260/* Helper to allocate iovec buffers for all vqs. */
 261static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
 262{
 263        struct vhost_virtqueue *vq;
 264        int i;
 265
 266        for (i = 0; i < dev->nvqs; ++i) {
 267                vq = dev->vqs[i];
 268                vq->indirect = kmalloc(sizeof *vq->indirect * UIO_MAXIOV,
 269                                       GFP_KERNEL);
 270                vq->log = kmalloc(sizeof *vq->log * UIO_MAXIOV, GFP_KERNEL);
 271                vq->heads = kmalloc(sizeof *vq->heads * UIO_MAXIOV, GFP_KERNEL);
 272                if (!vq->indirect || !vq->log || !vq->heads)
 273                        goto err_nomem;
 274        }
 275        return 0;
 276
 277err_nomem:
 278        for (; i >= 0; --i)
 279                vhost_vq_free_iovecs(dev->vqs[i]);
 280        return -ENOMEM;
 281}
 282
 283static void vhost_dev_free_iovecs(struct vhost_dev *dev)
 284{
 285        int i;
 286
 287        for (i = 0; i < dev->nvqs; ++i)
 288                vhost_vq_free_iovecs(dev->vqs[i]);
 289}
 290
 291long vhost_dev_init(struct vhost_dev *dev,
 292                    struct vhost_virtqueue **vqs, int nvqs)
 293{
 294        struct vhost_virtqueue *vq;
 295        int i;
 296
 297        dev->vqs = vqs;
 298        dev->nvqs = nvqs;
 299        mutex_init(&dev->mutex);
 300        dev->log_ctx = NULL;
 301        dev->log_file = NULL;
 302        dev->memory = NULL;
 303        dev->mm = NULL;
 304        spin_lock_init(&dev->work_lock);
 305        INIT_LIST_HEAD(&dev->work_list);
 306        dev->worker = NULL;
 307
 308        for (i = 0; i < dev->nvqs; ++i) {
 309                vq = dev->vqs[i];
 310                vq->log = NULL;
 311                vq->indirect = NULL;
 312                vq->heads = NULL;
 313                vq->dev = dev;
 314                mutex_init(&vq->mutex);
 315                vhost_vq_reset(dev, vq);
 316                if (vq->handle_kick)
 317                        vhost_poll_init(&vq->poll, vq->handle_kick,
 318                                        POLLIN, dev);
 319        }
 320
 321        return 0;
 322}
 323EXPORT_SYMBOL_GPL(vhost_dev_init);
 324
 325/* Caller should have device mutex */
 326long vhost_dev_check_owner(struct vhost_dev *dev)
 327{
 328        /* Are you the owner? If not, I don't think you mean to do that */
 329        return dev->mm == current->mm ? 0 : -EPERM;
 330}
 331EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
 332
 333struct vhost_attach_cgroups_struct {
 334        struct vhost_work work;
 335        struct task_struct *owner;
 336        int ret;
 337};
 338
 339static void vhost_attach_cgroups_work(struct vhost_work *work)
 340{
 341        struct vhost_attach_cgroups_struct *s;
 342
 343        s = container_of(work, struct vhost_attach_cgroups_struct, work);
 344        s->ret = cgroup_attach_task_all(s->owner, current);
 345}
 346
 347static int vhost_attach_cgroups(struct vhost_dev *dev)
 348{
 349        struct vhost_attach_cgroups_struct attach;
 350
 351        attach.owner = current;
 352        vhost_work_init(&attach.work, vhost_attach_cgroups_work);
 353        vhost_work_queue(dev, &attach.work);
 354        vhost_work_flush(dev, &attach.work);
 355        return attach.ret;
 356}
 357
 358/* Caller should have device mutex */
 359bool vhost_dev_has_owner(struct vhost_dev *dev)
 360{
 361        return dev->mm;
 362}
 363EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
 364
 365/* Caller should have device mutex */
 366long vhost_dev_set_owner(struct vhost_dev *dev)
 367{
 368        struct task_struct *worker;
 369        int err;
 370
 371        /* Is there an owner already? */
 372        if (vhost_dev_has_owner(dev)) {
 373                err = -EBUSY;
 374                goto err_mm;
 375        }
 376
 377        /* No owner, become one */
 378        dev->mm = get_task_mm(current);
 379        worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
 380        if (IS_ERR(worker)) {
 381                err = PTR_ERR(worker);
 382                goto err_worker;
 383        }
 384
 385        dev->worker = worker;
 386        wake_up_process(worker);        /* avoid contributing to loadavg */
 387
 388        err = vhost_attach_cgroups(dev);
 389        if (err)
 390                goto err_cgroup;
 391
 392        err = vhost_dev_alloc_iovecs(dev);
 393        if (err)
 394                goto err_cgroup;
 395
 396        return 0;
 397err_cgroup:
 398        kthread_stop(worker);
 399        dev->worker = NULL;
 400err_worker:
 401        if (dev->mm)
 402                mmput(dev->mm);
 403        dev->mm = NULL;
 404err_mm:
 405        return err;
 406}
 407EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
 408
 409struct vhost_memory *vhost_dev_reset_owner_prepare(void)
 410{
 411        return kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
 412}
 413EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
 414
 415/* Caller should have device mutex */
 416void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory)
 417{
 418        vhost_dev_cleanup(dev, true);
 419
 420        /* Restore memory to default empty mapping. */
 421        memory->nregions = 0;
 422        RCU_INIT_POINTER(dev->memory, memory);
 423}
 424EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
 425
 426void vhost_dev_stop(struct vhost_dev *dev)
 427{
 428        int i;
 429
 430        for (i = 0; i < dev->nvqs; ++i) {
 431                if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
 432                        vhost_poll_stop(&dev->vqs[i]->poll);
 433                        vhost_poll_flush(&dev->vqs[i]->poll);
 434                }
 435        }
 436}
 437EXPORT_SYMBOL_GPL(vhost_dev_stop);
 438
 439/* Caller should have device mutex if and only if locked is set */
 440void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
 441{
 442        int i;
 443
 444        for (i = 0; i < dev->nvqs; ++i) {
 445                if (dev->vqs[i]->error_ctx)
 446                        eventfd_ctx_put(dev->vqs[i]->error_ctx);
 447                if (dev->vqs[i]->error)
 448                        fput(dev->vqs[i]->error);
 449                if (dev->vqs[i]->kick)
 450                        fput(dev->vqs[i]->kick);
 451                if (dev->vqs[i]->call_ctx)
 452                        eventfd_ctx_put(dev->vqs[i]->call_ctx);
 453                if (dev->vqs[i]->call)
 454                        fput(dev->vqs[i]->call);
 455                vhost_vq_reset(dev, dev->vqs[i]);
 456        }
 457        vhost_dev_free_iovecs(dev);
 458        if (dev->log_ctx)
 459                eventfd_ctx_put(dev->log_ctx);
 460        dev->log_ctx = NULL;
 461        if (dev->log_file)
 462                fput(dev->log_file);
 463        dev->log_file = NULL;
 464        /* No one will access memory at this point */
 465        kfree(rcu_dereference_protected(dev->memory,
 466                                        locked ==
 467                                                lockdep_is_held(&dev->mutex)));
 468        RCU_INIT_POINTER(dev->memory, NULL);
 469        WARN_ON(!list_empty(&dev->work_list));
 470        if (dev->worker) {
 471                kthread_stop(dev->worker);
 472                dev->worker = NULL;
 473        }
 474        if (dev->mm)
 475                mmput(dev->mm);
 476        dev->mm = NULL;
 477}
 478EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
 479
 480static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
 481{
 482        u64 a = addr / VHOST_PAGE_SIZE / 8;
 483
 484        /* Make sure 64 bit math will not overflow. */
 485        if (a > ULONG_MAX - (unsigned long)log_base ||
 486            a + (unsigned long)log_base > ULONG_MAX)
 487                return 0;
 488
 489        return access_ok(VERIFY_WRITE, log_base + a,
 490                         (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
 491}
 492
 493/* Caller should have vq mutex and device mutex. */
 494static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
 495                               int log_all)
 496{
 497        int i;
 498
 499        if (!mem)
 500                return 0;
 501
 502        for (i = 0; i < mem->nregions; ++i) {
 503                struct vhost_memory_region *m = mem->regions + i;
 504                unsigned long a = m->userspace_addr;
 505                if (m->memory_size > ULONG_MAX)
 506                        return 0;
 507                else if (!access_ok(VERIFY_WRITE, (void __user *)a,
 508                                    m->memory_size))
 509                        return 0;
 510                else if (log_all && !log_access_ok(log_base,
 511                                                   m->guest_phys_addr,
 512                                                   m->memory_size))
 513                        return 0;
 514        }
 515        return 1;
 516}
 517
 518/* Can we switch to this memory table? */
 519/* Caller should have device mutex but not vq mutex */
 520static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
 521                            int log_all)
 522{
 523        int i;
 524
 525        for (i = 0; i < d->nvqs; ++i) {
 526                int ok;
 527                mutex_lock(&d->vqs[i]->mutex);
 528                /* If ring is inactive, will check when it's enabled. */
 529                if (d->vqs[i]->private_data)
 530                        ok = vq_memory_access_ok(d->vqs[i]->log_base, mem,
 531                                                 log_all);
 532                else
 533                        ok = 1;
 534                mutex_unlock(&d->vqs[i]->mutex);
 535                if (!ok)
 536                        return 0;
 537        }
 538        return 1;
 539}
 540
 541static int vq_access_ok(struct vhost_dev *d, unsigned int num,
 542                        struct vring_desc __user *desc,
 543                        struct vring_avail __user *avail,
 544                        struct vring_used __user *used)
 545{
 546        size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 547        return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
 548               access_ok(VERIFY_READ, avail,
 549                         sizeof *avail + num * sizeof *avail->ring + s) &&
 550               access_ok(VERIFY_WRITE, used,
 551                        sizeof *used + num * sizeof *used->ring + s);
 552}
 553
 554/* Can we log writes? */
 555/* Caller should have device mutex but not vq mutex */
 556int vhost_log_access_ok(struct vhost_dev *dev)
 557{
 558        struct vhost_memory *mp;
 559
 560        mp = rcu_dereference_protected(dev->memory,
 561                                       lockdep_is_held(&dev->mutex));
 562        return memory_access_ok(dev, mp, 1);
 563}
 564EXPORT_SYMBOL_GPL(vhost_log_access_ok);
 565
 566/* Verify access for write logging. */
 567/* Caller should have vq mutex and device mutex */
 568static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq,
 569                            void __user *log_base)
 570{
 571        struct vhost_memory *mp;
 572        size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 573
 574        mp = rcu_dereference_protected(vq->dev->memory,
 575                                       lockdep_is_held(&vq->mutex));
 576        return vq_memory_access_ok(log_base, mp,
 577                            vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
 578                (!vq->log_used || log_access_ok(log_base, vq->log_addr,
 579                                        sizeof *vq->used +
 580                                        vq->num * sizeof *vq->used->ring + s));
 581}
 582
 583/* Can we start vq? */
 584/* Caller should have vq mutex and device mutex */
 585int vhost_vq_access_ok(struct vhost_virtqueue *vq)
 586{
 587        return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) &&
 588                vq_log_access_ok(vq->dev, vq, vq->log_base);
 589}
 590EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
 591
 592static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
 593{
 594        struct vhost_memory mem, *newmem, *oldmem;
 595        unsigned long size = offsetof(struct vhost_memory, regions);
 596
 597        if (copy_from_user(&mem, m, size))
 598                return -EFAULT;
 599        if (mem.padding)
 600                return -EOPNOTSUPP;
 601        if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
 602                return -E2BIG;
 603        newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
 604        if (!newmem)
 605                return -ENOMEM;
 606
 607        memcpy(newmem, &mem, size);
 608        if (copy_from_user(newmem->regions, m->regions,
 609                           mem.nregions * sizeof *m->regions)) {
 610                kfree(newmem);
 611                return -EFAULT;
 612        }
 613
 614        if (!memory_access_ok(d, newmem,
 615                              vhost_has_feature(d, VHOST_F_LOG_ALL))) {
 616                kfree(newmem);
 617                return -EFAULT;
 618        }
 619        oldmem = rcu_dereference_protected(d->memory,
 620                                           lockdep_is_held(&d->mutex));
 621        rcu_assign_pointer(d->memory, newmem);
 622        synchronize_rcu();
 623        kfree(oldmem);
 624        return 0;
 625}
 626
 627long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
 628{
 629        struct file *eventfp, *filep = NULL;
 630        bool pollstart = false, pollstop = false;
 631        struct eventfd_ctx *ctx = NULL;
 632        u32 __user *idxp = argp;
 633        struct vhost_virtqueue *vq;
 634        struct vhost_vring_state s;
 635        struct vhost_vring_file f;
 636        struct vhost_vring_addr a;
 637        u32 idx;
 638        long r;
 639
 640        r = get_user(idx, idxp);
 641        if (r < 0)
 642                return r;
 643        if (idx >= d->nvqs)
 644                return -ENOBUFS;
 645
 646        vq = d->vqs[idx];
 647
 648        mutex_lock(&vq->mutex);
 649
 650        switch (ioctl) {
 651        case VHOST_SET_VRING_NUM:
 652                /* Resizing ring with an active backend?
 653                 * You don't want to do that. */
 654                if (vq->private_data) {
 655                        r = -EBUSY;
 656                        break;
 657                }
 658                if (copy_from_user(&s, argp, sizeof s)) {
 659                        r = -EFAULT;
 660                        break;
 661                }
 662                if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
 663                        r = -EINVAL;
 664                        break;
 665                }
 666                vq->num = s.num;
 667                break;
 668        case VHOST_SET_VRING_BASE:
 669                /* Moving base with an active backend?
 670                 * You don't want to do that. */
 671                if (vq->private_data) {
 672                        r = -EBUSY;
 673                        break;
 674                }
 675                if (copy_from_user(&s, argp, sizeof s)) {
 676                        r = -EFAULT;
 677                        break;
 678                }
 679                if (s.num > 0xffff) {
 680                        r = -EINVAL;
 681                        break;
 682                }
 683                vq->last_avail_idx = s.num;
 684                /* Forget the cached index value. */
 685                vq->avail_idx = vq->last_avail_idx;
 686                break;
 687        case VHOST_GET_VRING_BASE:
 688                s.index = idx;
 689                s.num = vq->last_avail_idx;
 690                if (copy_to_user(argp, &s, sizeof s))
 691                        r = -EFAULT;
 692                break;
 693        case VHOST_SET_VRING_ADDR:
 694                if (copy_from_user(&a, argp, sizeof a)) {
 695                        r = -EFAULT;
 696                        break;
 697                }
 698                if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
 699                        r = -EOPNOTSUPP;
 700                        break;
 701                }
 702                /* For 32bit, verify that the top 32bits of the user
 703                   data are set to zero. */
 704                if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
 705                    (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
 706                    (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
 707                        r = -EFAULT;
 708                        break;
 709                }
 710                if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
 711                    (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
 712                    (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
 713                        r = -EINVAL;
 714                        break;
 715                }
 716
 717                /* We only verify access here if backend is configured.
 718                 * If it is not, we don't as size might not have been setup.
 719                 * We will verify when backend is configured. */
 720                if (vq->private_data) {
 721                        if (!vq_access_ok(d, vq->num,
 722                                (void __user *)(unsigned long)a.desc_user_addr,
 723                                (void __user *)(unsigned long)a.avail_user_addr,
 724                                (void __user *)(unsigned long)a.used_user_addr)) {
 725                                r = -EINVAL;
 726                                break;
 727                        }
 728
 729                        /* Also validate log access for used ring if enabled. */
 730                        if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
 731                            !log_access_ok(vq->log_base, a.log_guest_addr,
 732                                           sizeof *vq->used +
 733                                           vq->num * sizeof *vq->used->ring)) {
 734                                r = -EINVAL;
 735                                break;
 736                        }
 737                }
 738
 739                vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
 740                vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
 741                vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
 742                vq->log_addr = a.log_guest_addr;
 743                vq->used = (void __user *)(unsigned long)a.used_user_addr;
 744                break;
 745        case VHOST_SET_VRING_KICK:
 746                if (copy_from_user(&f, argp, sizeof f)) {
 747                        r = -EFAULT;
 748                        break;
 749                }
 750                eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
 751                if (IS_ERR(eventfp)) {
 752                        r = PTR_ERR(eventfp);
 753                        break;
 754                }
 755                if (eventfp != vq->kick) {
 756                        pollstop = (filep = vq->kick) != NULL;
 757                        pollstart = (vq->kick = eventfp) != NULL;
 758                } else
 759                        filep = eventfp;
 760                break;
 761        case VHOST_SET_VRING_CALL:
 762                if (copy_from_user(&f, argp, sizeof f)) {
 763                        r = -EFAULT;
 764                        break;
 765                }
 766                eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
 767                if (IS_ERR(eventfp)) {
 768                        r = PTR_ERR(eventfp);
 769                        break;
 770                }
 771                if (eventfp != vq->call) {
 772                        filep = vq->call;
 773                        ctx = vq->call_ctx;
 774                        vq->call = eventfp;
 775                        vq->call_ctx = eventfp ?
 776                                eventfd_ctx_fileget(eventfp) : NULL;
 777                } else
 778                        filep = eventfp;
 779                break;
 780        case VHOST_SET_VRING_ERR:
 781                if (copy_from_user(&f, argp, sizeof f)) {
 782                        r = -EFAULT;
 783                        break;
 784                }
 785                eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
 786                if (IS_ERR(eventfp)) {
 787                        r = PTR_ERR(eventfp);
 788                        break;
 789                }
 790                if (eventfp != vq->error) {
 791                        filep = vq->error;
 792                        vq->error = eventfp;
 793                        ctx = vq->error_ctx;
 794                        vq->error_ctx = eventfp ?
 795                                eventfd_ctx_fileget(eventfp) : NULL;
 796                } else
 797                        filep = eventfp;
 798                break;
 799        default:
 800                r = -ENOIOCTLCMD;
 801        }
 802
 803        if (pollstop && vq->handle_kick)
 804                vhost_poll_stop(&vq->poll);
 805
 806        if (ctx)
 807                eventfd_ctx_put(ctx);
 808        if (filep)
 809                fput(filep);
 810
 811        if (pollstart && vq->handle_kick)
 812                r = vhost_poll_start(&vq->poll, vq->kick);
 813
 814        mutex_unlock(&vq->mutex);
 815
 816        if (pollstop && vq->handle_kick)
 817                vhost_poll_flush(&vq->poll);
 818        return r;
 819}
 820EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
 821
 822/* Caller must have device mutex */
 823long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
 824{
 825        struct file *eventfp, *filep = NULL;
 826        struct eventfd_ctx *ctx = NULL;
 827        u64 p;
 828        long r;
 829        int i, fd;
 830
 831        /* If you are not the owner, you can become one */
 832        if (ioctl == VHOST_SET_OWNER) {
 833                r = vhost_dev_set_owner(d);
 834                goto done;
 835        }
 836
 837        /* You must be the owner to do anything else */
 838        r = vhost_dev_check_owner(d);
 839        if (r)
 840                goto done;
 841
 842        switch (ioctl) {
 843        case VHOST_SET_MEM_TABLE:
 844                r = vhost_set_memory(d, argp);
 845                break;
 846        case VHOST_SET_LOG_BASE:
 847                if (copy_from_user(&p, argp, sizeof p)) {
 848                        r = -EFAULT;
 849                        break;
 850                }
 851                if ((u64)(unsigned long)p != p) {
 852                        r = -EFAULT;
 853                        break;
 854                }
 855                for (i = 0; i < d->nvqs; ++i) {
 856                        struct vhost_virtqueue *vq;
 857                        void __user *base = (void __user *)(unsigned long)p;
 858                        vq = d->vqs[i];
 859                        mutex_lock(&vq->mutex);
 860                        /* If ring is inactive, will check when it's enabled. */
 861                        if (vq->private_data && !vq_log_access_ok(d, vq, base))
 862                                r = -EFAULT;
 863                        else
 864                                vq->log_base = base;
 865                        mutex_unlock(&vq->mutex);
 866                }
 867                break;
 868        case VHOST_SET_LOG_FD:
 869                r = get_user(fd, (int __user *)argp);
 870                if (r < 0)
 871                        break;
 872                eventfp = fd == -1 ? NULL : eventfd_fget(fd);
 873                if (IS_ERR(eventfp)) {
 874                        r = PTR_ERR(eventfp);
 875                        break;
 876                }
 877                if (eventfp != d->log_file) {
 878                        filep = d->log_file;
 879                        ctx = d->log_ctx;
 880                        d->log_ctx = eventfp ?
 881                                eventfd_ctx_fileget(eventfp) : NULL;
 882                } else
 883                        filep = eventfp;
 884                for (i = 0; i < d->nvqs; ++i) {
 885                        mutex_lock(&d->vqs[i]->mutex);
 886                        d->vqs[i]->log_ctx = d->log_ctx;
 887                        mutex_unlock(&d->vqs[i]->mutex);
 888                }
 889                if (ctx)
 890                        eventfd_ctx_put(ctx);
 891                if (filep)
 892                        fput(filep);
 893                break;
 894        default:
 895                r = -ENOIOCTLCMD;
 896                break;
 897        }
 898done:
 899        return r;
 900}
 901EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
 902
 903static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
 904                                                     __u64 addr, __u32 len)
 905{
 906        struct vhost_memory_region *reg;
 907        int i;
 908
 909        /* linear search is not brilliant, but we really have on the order of 6
 910         * regions in practice */
 911        for (i = 0; i < mem->nregions; ++i) {
 912                reg = mem->regions + i;
 913                if (reg->guest_phys_addr <= addr &&
 914                    reg->guest_phys_addr + reg->memory_size - 1 >= addr)
 915                        return reg;
 916        }
 917        return NULL;
 918}
 919
 920/* TODO: This is really inefficient.  We need something like get_user()
 921 * (instruction directly accesses the data, with an exception table entry
 922 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
 923 */
 924static int set_bit_to_user(int nr, void __user *addr)
 925{
 926        unsigned long log = (unsigned long)addr;
 927        struct page *page;
 928        void *base;
 929        int bit = nr + (log % PAGE_SIZE) * 8;
 930        int r;
 931
 932        r = get_user_pages_fast(log, 1, 1, &page);
 933        if (r < 0)
 934                return r;
 935        BUG_ON(r != 1);
 936        base = kmap_atomic(page);
 937        set_bit(bit, base);
 938        kunmap_atomic(base);
 939        set_page_dirty_lock(page);
 940        put_page(page);
 941        return 0;
 942}
 943
 944static int log_write(void __user *log_base,
 945                     u64 write_address, u64 write_length)
 946{
 947        u64 write_page = write_address / VHOST_PAGE_SIZE;
 948        int r;
 949
 950        if (!write_length)
 951                return 0;
 952        write_length += write_address % VHOST_PAGE_SIZE;
 953        for (;;) {
 954                u64 base = (u64)(unsigned long)log_base;
 955                u64 log = base + write_page / 8;
 956                int bit = write_page % 8;
 957                if ((u64)(unsigned long)log != log)
 958                        return -EFAULT;
 959                r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
 960                if (r < 0)
 961                        return r;
 962                if (write_length <= VHOST_PAGE_SIZE)
 963                        break;
 964                write_length -= VHOST_PAGE_SIZE;
 965                write_page += 1;
 966        }
 967        return r;
 968}
 969
 970int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
 971                    unsigned int log_num, u64 len)
 972{
 973        int i, r;
 974
 975        /* Make sure data written is seen before log. */
 976        smp_wmb();
 977        for (i = 0; i < log_num; ++i) {
 978                u64 l = min(log[i].len, len);
 979                r = log_write(vq->log_base, log[i].addr, l);
 980                if (r < 0)
 981                        return r;
 982                len -= l;
 983                if (!len) {
 984                        if (vq->log_ctx)
 985                                eventfd_signal(vq->log_ctx, 1);
 986                        return 0;
 987                }
 988        }
 989        /* Length written exceeds what we have stored. This is a bug. */
 990        BUG();
 991        return 0;
 992}
 993EXPORT_SYMBOL_GPL(vhost_log_write);
 994
 995static int vhost_update_used_flags(struct vhost_virtqueue *vq)
 996{
 997        void __user *used;
 998        if (__put_user(vq->used_flags, &vq->used->flags) < 0)
 999                return -EFAULT;
1000        if (unlikely(vq->log_used)) {
1001                /* Make sure the flag is seen before log. */
1002                smp_wmb();
1003                /* Log used flag write. */
1004                used = &vq->used->flags;
1005                log_write(vq->log_base, vq->log_addr +
1006                          (used - (void __user *)vq->used),
1007                          sizeof vq->used->flags);
1008                if (vq->log_ctx)
1009                        eventfd_signal(vq->log_ctx, 1);
1010        }
1011        return 0;
1012}
1013
1014static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1015{
1016        if (__put_user(vq->avail_idx, vhost_avail_event(vq)))
1017                return -EFAULT;
1018        if (unlikely(vq->log_used)) {
1019                void __user *used;
1020                /* Make sure the event is seen before log. */
1021                smp_wmb();
1022                /* Log avail event write */
1023                used = vhost_avail_event(vq);
1024                log_write(vq->log_base, vq->log_addr +
1025                          (used - (void __user *)vq->used),
1026                          sizeof *vhost_avail_event(vq));
1027                if (vq->log_ctx)
1028                        eventfd_signal(vq->log_ctx, 1);
1029        }
1030        return 0;
1031}
1032
1033int vhost_init_used(struct vhost_virtqueue *vq)
1034{
1035        int r;
1036        if (!vq->private_data)
1037                return 0;
1038
1039        r = vhost_update_used_flags(vq);
1040        if (r)
1041                return r;
1042        vq->signalled_used_valid = false;
1043        return get_user(vq->last_used_idx, &vq->used->idx);
1044}
1045EXPORT_SYMBOL_GPL(vhost_init_used);
1046
1047static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
1048                          struct iovec iov[], int iov_size)
1049{
1050        const struct vhost_memory_region *reg;
1051        struct vhost_memory *mem;
1052        struct iovec *_iov;
1053        u64 s = 0;
1054        int ret = 0;
1055
1056        rcu_read_lock();
1057
1058        mem = rcu_dereference(dev->memory);
1059        while ((u64)len > s) {
1060                u64 size;
1061                if (unlikely(ret >= iov_size)) {
1062                        ret = -ENOBUFS;
1063                        break;
1064                }
1065                reg = find_region(mem, addr, len);
1066                if (unlikely(!reg)) {
1067                        ret = -EFAULT;
1068                        break;
1069                }
1070                _iov = iov + ret;
1071                size = reg->memory_size - addr + reg->guest_phys_addr;
1072                _iov->iov_len = min((u64)len - s, size);
1073                _iov->iov_base = (void __user *)(unsigned long)
1074                        (reg->userspace_addr + addr - reg->guest_phys_addr);
1075                s += size;
1076                addr += size;
1077                ++ret;
1078        }
1079
1080        rcu_read_unlock();
1081        return ret;
1082}
1083
1084/* Each buffer in the virtqueues is actually a chain of descriptors.  This
1085 * function returns the next descriptor in the chain,
1086 * or -1U if we're at the end. */
1087static unsigned next_desc(struct vring_desc *desc)
1088{
1089        unsigned int next;
1090
1091        /* If this descriptor says it doesn't chain, we're done. */
1092        if (!(desc->flags & VRING_DESC_F_NEXT))
1093                return -1U;
1094
1095        /* Check they're not leading us off end of descriptors. */
1096        next = desc->next;
1097        /* Make sure compiler knows to grab that: we don't want it changing! */
1098        /* We will use the result as an index in an array, so most
1099         * architectures only need a compiler barrier here. */
1100        read_barrier_depends();
1101
1102        return next;
1103}
1104
1105static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1106                        struct iovec iov[], unsigned int iov_size,
1107                        unsigned int *out_num, unsigned int *in_num,
1108                        struct vhost_log *log, unsigned int *log_num,
1109                        struct vring_desc *indirect)
1110{
1111        struct vring_desc desc;
1112        unsigned int i = 0, count, found = 0;
1113        int ret;
1114
1115        /* Sanity check */
1116        if (unlikely(indirect->len % sizeof desc)) {
1117                vq_err(vq, "Invalid length in indirect descriptor: "
1118                       "len 0x%llx not multiple of 0x%zx\n",
1119                       (unsigned long long)indirect->len,
1120                       sizeof desc);
1121                return -EINVAL;
1122        }
1123
1124        ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
1125                             UIO_MAXIOV);
1126        if (unlikely(ret < 0)) {
1127                vq_err(vq, "Translation failure %d in indirect.\n", ret);
1128                return ret;
1129        }
1130
1131        /* We will use the result as an address to read from, so most
1132         * architectures only need a compiler barrier here. */
1133        read_barrier_depends();
1134
1135        count = indirect->len / sizeof desc;
1136        /* Buffers are chained via a 16 bit next field, so
1137         * we can have at most 2^16 of these. */
1138        if (unlikely(count > USHRT_MAX + 1)) {
1139                vq_err(vq, "Indirect buffer length too big: %d\n",
1140                       indirect->len);
1141                return -E2BIG;
1142        }
1143
1144        do {
1145                unsigned iov_count = *in_num + *out_num;
1146                if (unlikely(++found > count)) {
1147                        vq_err(vq, "Loop detected: last one at %u "
1148                               "indirect size %u\n",
1149                               i, count);
1150                        return -EINVAL;
1151                }
1152                if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
1153                                              vq->indirect, sizeof desc))) {
1154                        vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
1155                               i, (size_t)indirect->addr + i * sizeof desc);
1156                        return -EINVAL;
1157                }
1158                if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
1159                        vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
1160                               i, (size_t)indirect->addr + i * sizeof desc);
1161                        return -EINVAL;
1162                }
1163
1164                ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
1165                                     iov_size - iov_count);
1166                if (unlikely(ret < 0)) {
1167                        vq_err(vq, "Translation failure %d indirect idx %d\n",
1168                               ret, i);
1169                        return ret;
1170                }
1171                /* If this is an input descriptor, increment that count. */
1172                if (desc.flags & VRING_DESC_F_WRITE) {
1173                        *in_num += ret;
1174                        if (unlikely(log)) {
1175                                log[*log_num].addr = desc.addr;
1176                                log[*log_num].len = desc.len;
1177                                ++*log_num;
1178                        }
1179                } else {
1180                        /* If it's an output descriptor, they're all supposed
1181                         * to come before any input descriptors. */
1182                        if (unlikely(*in_num)) {
1183                                vq_err(vq, "Indirect descriptor "
1184                                       "has out after in: idx %d\n", i);
1185                                return -EINVAL;
1186                        }
1187                        *out_num += ret;
1188                }
1189        } while ((i = next_desc(&desc)) != -1);
1190        return 0;
1191}
1192
1193/* This looks in the virtqueue and for the first available buffer, and converts
1194 * it to an iovec for convenient access.  Since descriptors consist of some
1195 * number of output then some number of input descriptors, it's actually two
1196 * iovecs, but we pack them into one and note how many of each there were.
1197 *
1198 * This function returns the descriptor number found, or vq->num (which is
1199 * never a valid descriptor number) if none was found.  A negative code is
1200 * returned on error. */
1201int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1202                      struct iovec iov[], unsigned int iov_size,
1203                      unsigned int *out_num, unsigned int *in_num,
1204                      struct vhost_log *log, unsigned int *log_num)
1205{
1206        struct vring_desc desc;
1207        unsigned int i, head, found = 0;
1208        u16 last_avail_idx;
1209        int ret;
1210
1211        /* Check it isn't doing very strange things with descriptor numbers. */
1212        last_avail_idx = vq->last_avail_idx;
1213        if (unlikely(__get_user(vq->avail_idx, &vq->avail->idx))) {
1214                vq_err(vq, "Failed to access avail idx at %p\n",
1215                       &vq->avail->idx);
1216                return -EFAULT;
1217        }
1218
1219        if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
1220                vq_err(vq, "Guest moved used index from %u to %u",
1221                       last_avail_idx, vq->avail_idx);
1222                return -EFAULT;
1223        }
1224
1225        /* If there's nothing new since last we looked, return invalid. */
1226        if (vq->avail_idx == last_avail_idx)
1227                return vq->num;
1228
1229        /* Only get avail ring entries after they have been exposed by guest. */
1230        smp_rmb();
1231
1232        /* Grab the next descriptor number they're advertising, and increment
1233         * the index we've seen. */
1234        if (unlikely(__get_user(head,
1235                                &vq->avail->ring[last_avail_idx % vq->num]))) {
1236                vq_err(vq, "Failed to read head: idx %d address %p\n",
1237                       last_avail_idx,
1238                       &vq->avail->ring[last_avail_idx % vq->num]);
1239                return -EFAULT;
1240        }
1241
1242        /* If their number is silly, that's an error. */
1243        if (unlikely(head >= vq->num)) {
1244                vq_err(vq, "Guest says index %u > %u is available",
1245                       head, vq->num);
1246                return -EINVAL;
1247        }
1248
1249        /* When we start there are none of either input nor output. */
1250        *out_num = *in_num = 0;
1251        if (unlikely(log))
1252                *log_num = 0;
1253
1254        i = head;
1255        do {
1256                unsigned iov_count = *in_num + *out_num;
1257                if (unlikely(i >= vq->num)) {
1258                        vq_err(vq, "Desc index is %u > %u, head = %u",
1259                               i, vq->num, head);
1260                        return -EINVAL;
1261                }
1262                if (unlikely(++found > vq->num)) {
1263                        vq_err(vq, "Loop detected: last one at %u "
1264                               "vq size %u head %u\n",
1265                               i, vq->num, head);
1266                        return -EINVAL;
1267                }
1268                ret = __copy_from_user(&desc, vq->desc + i, sizeof desc);
1269                if (unlikely(ret)) {
1270                        vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
1271                               i, vq->desc + i);
1272                        return -EFAULT;
1273                }
1274                if (desc.flags & VRING_DESC_F_INDIRECT) {
1275                        ret = get_indirect(dev, vq, iov, iov_size,
1276                                           out_num, in_num,
1277                                           log, log_num, &desc);
1278                        if (unlikely(ret < 0)) {
1279                                vq_err(vq, "Failure detected "
1280                                       "in indirect descriptor at idx %d\n", i);
1281                                return ret;
1282                        }
1283                        continue;
1284                }
1285
1286                ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
1287                                     iov_size - iov_count);
1288                if (unlikely(ret < 0)) {
1289                        vq_err(vq, "Translation failure %d descriptor idx %d\n",
1290                               ret, i);
1291                        return ret;
1292                }
1293                if (desc.flags & VRING_DESC_F_WRITE) {
1294                        /* If this is an input descriptor,
1295                         * increment that count. */
1296                        *in_num += ret;
1297                        if (unlikely(log)) {
1298                                log[*log_num].addr = desc.addr;
1299                                log[*log_num].len = desc.len;
1300                                ++*log_num;
1301                        }
1302                } else {
1303                        /* If it's an output descriptor, they're all supposed
1304                         * to come before any input descriptors. */
1305                        if (unlikely(*in_num)) {
1306                                vq_err(vq, "Descriptor has out after in: "
1307                                       "idx %d\n", i);
1308                                return -EINVAL;
1309                        }
1310                        *out_num += ret;
1311                }
1312        } while ((i = next_desc(&desc)) != -1);
1313
1314        /* On success, increment avail index. */
1315        vq->last_avail_idx++;
1316
1317        /* Assume notifications from guest are disabled at this point,
1318         * if they aren't we would need to update avail_event index. */
1319        BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
1320        return head;
1321}
1322EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
1323
1324/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
1325void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
1326{
1327        vq->last_avail_idx -= n;
1328}
1329EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
1330
1331/* After we've used one of their buffers, we tell them about it.  We'll then
1332 * want to notify the guest, using eventfd. */
1333int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
1334{
1335        struct vring_used_elem __user *used;
1336
1337        /* The virtqueue contains a ring of used buffers.  Get a pointer to the
1338         * next entry in that used ring. */
1339        used = &vq->used->ring[vq->last_used_idx % vq->num];
1340        if (__put_user(head, &used->id)) {
1341                vq_err(vq, "Failed to write used id");
1342                return -EFAULT;
1343        }
1344        if (__put_user(len, &used->len)) {
1345                vq_err(vq, "Failed to write used len");
1346                return -EFAULT;
1347        }
1348        /* Make sure buffer is written before we update index. */
1349        smp_wmb();
1350        if (__put_user(vq->last_used_idx + 1, &vq->used->idx)) {
1351                vq_err(vq, "Failed to increment used idx");
1352                return -EFAULT;
1353        }
1354        if (unlikely(vq->log_used)) {
1355                /* Make sure data is seen before log. */
1356                smp_wmb();
1357                /* Log used ring entry write. */
1358                log_write(vq->log_base,
1359                          vq->log_addr +
1360                           ((void __user *)used - (void __user *)vq->used),
1361                          sizeof *used);
1362                /* Log used index update. */
1363                log_write(vq->log_base,
1364                          vq->log_addr + offsetof(struct vring_used, idx),
1365                          sizeof vq->used->idx);
1366                if (vq->log_ctx)
1367                        eventfd_signal(vq->log_ctx, 1);
1368        }
1369        vq->last_used_idx++;
1370        /* If the driver never bothers to signal in a very long while,
1371         * used index might wrap around. If that happens, invalidate
1372         * signalled_used index we stored. TODO: make sure driver
1373         * signals at least once in 2^16 and remove this. */
1374        if (unlikely(vq->last_used_idx == vq->signalled_used))
1375                vq->signalled_used_valid = false;
1376        return 0;
1377}
1378EXPORT_SYMBOL_GPL(vhost_add_used);
1379
1380static int __vhost_add_used_n(struct vhost_virtqueue *vq,
1381                            struct vring_used_elem *heads,
1382                            unsigned count)
1383{
1384        struct vring_used_elem __user *used;
1385        u16 old, new;
1386        int start;
1387
1388        start = vq->last_used_idx % vq->num;
1389        used = vq->used->ring + start;
1390        if (__copy_to_user(used, heads, count * sizeof *used)) {
1391                vq_err(vq, "Failed to write used");
1392                return -EFAULT;
1393        }
1394        if (unlikely(vq->log_used)) {
1395                /* Make sure data is seen before log. */
1396                smp_wmb();
1397                /* Log used ring entry write. */
1398                log_write(vq->log_base,
1399                          vq->log_addr +
1400                           ((void __user *)used - (void __user *)vq->used),
1401                          count * sizeof *used);
1402        }
1403        old = vq->last_used_idx;
1404        new = (vq->last_used_idx += count);
1405        /* If the driver never bothers to signal in a very long while,
1406         * used index might wrap around. If that happens, invalidate
1407         * signalled_used index we stored. TODO: make sure driver
1408         * signals at least once in 2^16 and remove this. */
1409        if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
1410                vq->signalled_used_valid = false;
1411        return 0;
1412}
1413
1414/* After we've used one of their buffers, we tell them about it.  We'll then
1415 * want to notify the guest, using eventfd. */
1416int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
1417                     unsigned count)
1418{
1419        int start, n, r;
1420
1421        start = vq->last_used_idx % vq->num;
1422        n = vq->num - start;
1423        if (n < count) {
1424                r = __vhost_add_used_n(vq, heads, n);
1425                if (r < 0)
1426                        return r;
1427                heads += n;
1428                count -= n;
1429        }
1430        r = __vhost_add_used_n(vq, heads, count);
1431
1432        /* Make sure buffer is written before we update index. */
1433        smp_wmb();
1434        if (put_user(vq->last_used_idx, &vq->used->idx)) {
1435                vq_err(vq, "Failed to increment used idx");
1436                return -EFAULT;
1437        }
1438        if (unlikely(vq->log_used)) {
1439                /* Log used index update. */
1440                log_write(vq->log_base,
1441                          vq->log_addr + offsetof(struct vring_used, idx),
1442                          sizeof vq->used->idx);
1443                if (vq->log_ctx)
1444                        eventfd_signal(vq->log_ctx, 1);
1445        }
1446        return r;
1447}
1448EXPORT_SYMBOL_GPL(vhost_add_used_n);
1449
1450static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1451{
1452        __u16 old, new, event;
1453        bool v;
1454        /* Flush out used index updates. This is paired
1455         * with the barrier that the Guest executes when enabling
1456         * interrupts. */
1457        smp_mb();
1458
1459        if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1460            unlikely(vq->avail_idx == vq->last_avail_idx))
1461                return true;
1462
1463        if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1464                __u16 flags;
1465                if (__get_user(flags, &vq->avail->flags)) {
1466                        vq_err(vq, "Failed to get flags");
1467                        return true;
1468                }
1469                return !(flags & VRING_AVAIL_F_NO_INTERRUPT);
1470        }
1471        old = vq->signalled_used;
1472        v = vq->signalled_used_valid;
1473        new = vq->signalled_used = vq->last_used_idx;
1474        vq->signalled_used_valid = true;
1475
1476        if (unlikely(!v))
1477                return true;
1478
1479        if (get_user(event, vhost_used_event(vq))) {
1480                vq_err(vq, "Failed to get used event idx");
1481                return true;
1482        }
1483        return vring_need_event(event, new, old);
1484}
1485
1486/* This actually signals the guest, using eventfd. */
1487void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1488{
1489        /* Signal the Guest tell them we used something up. */
1490        if (vq->call_ctx && vhost_notify(dev, vq))
1491                eventfd_signal(vq->call_ctx, 1);
1492}
1493EXPORT_SYMBOL_GPL(vhost_signal);
1494
1495/* And here's the combo meal deal.  Supersize me! */
1496void vhost_add_used_and_signal(struct vhost_dev *dev,
1497                               struct vhost_virtqueue *vq,
1498                               unsigned int head, int len)
1499{
1500        vhost_add_used(vq, head, len);
1501        vhost_signal(dev, vq);
1502}
1503EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
1504
1505/* multi-buffer version of vhost_add_used_and_signal */
1506void vhost_add_used_and_signal_n(struct vhost_dev *dev,
1507                                 struct vhost_virtqueue *vq,
1508                                 struct vring_used_elem *heads, unsigned count)
1509{
1510        vhost_add_used_n(vq, heads, count);
1511        vhost_signal(dev, vq);
1512}
1513EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
1514
1515/* OK, now we need to know about added descriptors. */
1516bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1517{
1518        u16 avail_idx;
1519        int r;
1520
1521        if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
1522                return false;
1523        vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
1524        if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1525                r = vhost_update_used_flags(vq);
1526                if (r) {
1527                        vq_err(vq, "Failed to enable notification at %p: %d\n",
1528                               &vq->used->flags, r);
1529                        return false;
1530                }
1531        } else {
1532                r = vhost_update_avail_event(vq, vq->avail_idx);
1533                if (r) {
1534                        vq_err(vq, "Failed to update avail event index at %p: %d\n",
1535                               vhost_avail_event(vq), r);
1536                        return false;
1537                }
1538        }
1539        /* They could have slipped one in as we were doing that: make
1540         * sure it's written, then check again. */
1541        smp_mb();
1542        r = __get_user(avail_idx, &vq->avail->idx);
1543        if (r) {
1544                vq_err(vq, "Failed to check avail idx at %p: %d\n",
1545                       &vq->avail->idx, r);
1546                return false;
1547        }
1548
1549        return avail_idx != vq->avail_idx;
1550}
1551EXPORT_SYMBOL_GPL(vhost_enable_notify);
1552
1553/* We don't need to be notified again. */
1554void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1555{
1556        int r;
1557
1558        if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
1559                return;
1560        vq->used_flags |= VRING_USED_F_NO_NOTIFY;
1561        if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1562                r = vhost_update_used_flags(vq);
1563                if (r)
1564                        vq_err(vq, "Failed to enable notification at %p: %d\n",
1565                               &vq->used->flags, r);
1566        }
1567}
1568EXPORT_SYMBOL_GPL(vhost_disable_notify);
1569
1570static int __init vhost_init(void)
1571{
1572        return 0;
1573}
1574
1575static void __exit vhost_exit(void)
1576{
1577}
1578
1579module_init(vhost_init);
1580module_exit(vhost_exit);
1581
1582MODULE_VERSION("0.0.1");
1583MODULE_LICENSE("GPL v2");
1584MODULE_AUTHOR("Michael S. Tsirkin");
1585MODULE_DESCRIPTION("Host kernel accelerator for virtio");
1586
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.