linux/drivers/vhost/vhost.c
<<
>>
Prefs
   1/* Copyright (C) 2009 Red Hat, Inc.
   2 * Copyright (C) 2006 Rusty Russell IBM Corporation
   3 *
   4 * Author: Michael S. Tsirkin <mst@redhat.com>
   5 *
   6 * Inspiration, some code, and most witty comments come from
   7 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.
  10 *
  11 * Generic code for virtio server in host kernel.
  12 */
  13
  14#include <linux/eventfd.h>
  15#include <linux/vhost.h>
  16#include <linux/socket.h> /* memcpy_fromiovec */
  17#include <linux/mm.h>
  18#include <linux/mmu_context.h>
  19#include <linux/miscdevice.h>
  20#include <linux/mutex.h>
  21#include <linux/rcupdate.h>
  22#include <linux/poll.h>
  23#include <linux/file.h>
  24#include <linux/highmem.h>
  25#include <linux/slab.h>
  26#include <linux/kthread.h>
  27#include <linux/cgroup.h>
  28
  29#include "vhost.h"
  30
  31enum {
  32        VHOST_MEMORY_MAX_NREGIONS = 64,
  33        VHOST_MEMORY_F_LOG = 0x1,
  34};
  35
  36#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num])
  37#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num])
  38
  39static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
  40                            poll_table *pt)
  41{
  42        struct vhost_poll *poll;
  43
  44        poll = container_of(pt, struct vhost_poll, table);
  45        poll->wqh = wqh;
  46        add_wait_queue(wqh, &poll->wait);
  47}
  48
  49static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
  50                             void *key)
  51{
  52        struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
  53
  54        if (!((unsigned long)key & poll->mask))
  55                return 0;
  56
  57        vhost_poll_queue(poll);
  58        return 0;
  59}
  60
  61void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
  62{
  63        INIT_LIST_HEAD(&work->node);
  64        work->fn = fn;
  65        init_waitqueue_head(&work->done);
  66        work->flushing = 0;
  67        work->queue_seq = work->done_seq = 0;
  68}
  69
  70/* Init poll structure */
  71void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
  72                     unsigned long mask, struct vhost_dev *dev)
  73{
  74        init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
  75        init_poll_funcptr(&poll->table, vhost_poll_func);
  76        poll->mask = mask;
  77        poll->dev = dev;
  78        poll->wqh = NULL;
  79
  80        vhost_work_init(&poll->work, fn);
  81}
  82
  83/* Start polling a file. We add ourselves to file's wait queue. The caller must
  84 * keep a reference to a file until after vhost_poll_stop is called. */
  85int vhost_poll_start(struct vhost_poll *poll, struct file *file)
  86{
  87        unsigned long mask;
  88        int ret = 0;
  89
  90        if (poll->wqh)
  91                return 0;
  92
  93        mask = file->f_op->poll(file, &poll->table);
  94        if (mask)
  95                vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
  96        if (mask & POLLERR) {
  97                if (poll->wqh)
  98                        remove_wait_queue(poll->wqh, &poll->wait);
  99                ret = -EINVAL;
 100        }
 101
 102        return ret;
 103}
 104
 105/* Stop polling a file. After this function returns, it becomes safe to drop the
 106 * file reference. You must also flush afterwards. */
 107void vhost_poll_stop(struct vhost_poll *poll)
 108{
 109        if (poll->wqh) {
 110                remove_wait_queue(poll->wqh, &poll->wait);
 111                poll->wqh = NULL;
 112        }
 113}
 114
 115static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
 116                                unsigned seq)
 117{
 118        int left;
 119
 120        spin_lock_irq(&dev->work_lock);
 121        left = seq - work->done_seq;
 122        spin_unlock_irq(&dev->work_lock);
 123        return left <= 0;
 124}
 125
 126static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
 127{
 128        unsigned seq;
 129        int flushing;
 130
 131        spin_lock_irq(&dev->work_lock);
 132        seq = work->queue_seq;
 133        work->flushing++;
 134        spin_unlock_irq(&dev->work_lock);
 135        wait_event(work->done, vhost_work_seq_done(dev, work, seq));
 136        spin_lock_irq(&dev->work_lock);
 137        flushing = --work->flushing;
 138        spin_unlock_irq(&dev->work_lock);
 139        BUG_ON(flushing < 0);
 140}
 141
 142/* Flush any work that has been scheduled. When calling this, don't hold any
 143 * locks that are also used by the callback. */
 144void vhost_poll_flush(struct vhost_poll *poll)
 145{
 146        vhost_work_flush(poll->dev, &poll->work);
 147}
 148
 149void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
 150{
 151        unsigned long flags;
 152
 153        spin_lock_irqsave(&dev->work_lock, flags);
 154        if (list_empty(&work->node)) {
 155                list_add_tail(&work->node, &dev->work_list);
 156                work->queue_seq++;
 157                wake_up_process(dev->worker);
 158        }
 159        spin_unlock_irqrestore(&dev->work_lock, flags);
 160}
 161
 162void vhost_poll_queue(struct vhost_poll *poll)
 163{
 164        vhost_work_queue(poll->dev, &poll->work);
 165}
 166
 167static void vhost_vq_reset(struct vhost_dev *dev,
 168                           struct vhost_virtqueue *vq)
 169{
 170        vq->num = 1;
 171        vq->desc = NULL;
 172        vq->avail = NULL;
 173        vq->used = NULL;
 174        vq->last_avail_idx = 0;
 175        vq->avail_idx = 0;
 176        vq->last_used_idx = 0;
 177        vq->signalled_used = 0;
 178        vq->signalled_used_valid = false;
 179        vq->used_flags = 0;
 180        vq->log_used = false;
 181        vq->log_addr = -1ull;
 182        vq->private_data = NULL;
 183        vq->log_base = NULL;
 184        vq->error_ctx = NULL;
 185        vq->error = NULL;
 186        vq->kick = NULL;
 187        vq->call_ctx = NULL;
 188        vq->call = NULL;
 189        vq->log_ctx = NULL;
 190}
 191
 192static int vhost_worker(void *data)
 193{
 194        struct vhost_dev *dev = data;
 195        struct vhost_work *work = NULL;
 196        unsigned uninitialized_var(seq);
 197        mm_segment_t oldfs = get_fs();
 198
 199        set_fs(USER_DS);
 200        use_mm(dev->mm);
 201
 202        for (;;) {
 203                /* mb paired w/ kthread_stop */
 204                set_current_state(TASK_INTERRUPTIBLE);
 205
 206                spin_lock_irq(&dev->work_lock);
 207                if (work) {
 208                        work->done_seq = seq;
 209                        if (work->flushing)
 210                                wake_up_all(&work->done);
 211                }
 212
 213                if (kthread_should_stop()) {
 214                        spin_unlock_irq(&dev->work_lock);
 215                        __set_current_state(TASK_RUNNING);
 216                        break;
 217                }
 218                if (!list_empty(&dev->work_list)) {
 219                        work = list_first_entry(&dev->work_list,
 220                                                struct vhost_work, node);
 221                        list_del_init(&work->node);
 222                        seq = work->queue_seq;
 223                } else
 224                        work = NULL;
 225                spin_unlock_irq(&dev->work_lock);
 226
 227                if (work) {
 228                        __set_current_state(TASK_RUNNING);
 229                        work->fn(work);
 230                        if (need_resched())
 231                                schedule();
 232                } else
 233                        schedule();
 234
 235        }
 236        unuse_mm(dev->mm);
 237        set_fs(oldfs);
 238        return 0;
 239}
 240
 241static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
 242{
 243        kfree(vq->indirect);
 244        vq->indirect = NULL;
 245        kfree(vq->log);
 246        vq->log = NULL;
 247        kfree(vq->heads);
 248        vq->heads = NULL;
 249}
 250
 251/* Helper to allocate iovec buffers for all vqs. */
 252static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
 253{
 254        int i;
 255
 256        for (i = 0; i < dev->nvqs; ++i) {
 257                dev->vqs[i]->indirect = kmalloc(sizeof *dev->vqs[i]->indirect *
 258                                               UIO_MAXIOV, GFP_KERNEL);
 259                dev->vqs[i]->log = kmalloc(sizeof *dev->vqs[i]->log * UIO_MAXIOV,
 260                                          GFP_KERNEL);
 261                dev->vqs[i]->heads = kmalloc(sizeof *dev->vqs[i]->heads *
 262                                            UIO_MAXIOV, GFP_KERNEL);
 263                if (!dev->vqs[i]->indirect || !dev->vqs[i]->log ||
 264                        !dev->vqs[i]->heads)
 265                        goto err_nomem;
 266        }
 267        return 0;
 268
 269err_nomem:
 270        for (; i >= 0; --i)
 271                vhost_vq_free_iovecs(dev->vqs[i]);
 272        return -ENOMEM;
 273}
 274
 275static void vhost_dev_free_iovecs(struct vhost_dev *dev)
 276{
 277        int i;
 278
 279        for (i = 0; i < dev->nvqs; ++i)
 280                vhost_vq_free_iovecs(dev->vqs[i]);
 281}
 282
 283long vhost_dev_init(struct vhost_dev *dev,
 284                    struct vhost_virtqueue **vqs, int nvqs)
 285{
 286        int i;
 287
 288        dev->vqs = vqs;
 289        dev->nvqs = nvqs;
 290        mutex_init(&dev->mutex);
 291        dev->log_ctx = NULL;
 292        dev->log_file = NULL;
 293        dev->memory = NULL;
 294        dev->mm = NULL;
 295        spin_lock_init(&dev->work_lock);
 296        INIT_LIST_HEAD(&dev->work_list);
 297        dev->worker = NULL;
 298
 299        for (i = 0; i < dev->nvqs; ++i) {
 300                dev->vqs[i]->log = NULL;
 301                dev->vqs[i]->indirect = NULL;
 302                dev->vqs[i]->heads = NULL;
 303                dev->vqs[i]->dev = dev;
 304                mutex_init(&dev->vqs[i]->mutex);
 305                vhost_vq_reset(dev, dev->vqs[i]);
 306                if (dev->vqs[i]->handle_kick)
 307                        vhost_poll_init(&dev->vqs[i]->poll,
 308                                        dev->vqs[i]->handle_kick, POLLIN, dev);
 309        }
 310
 311        return 0;
 312}
 313
 314/* Caller should have device mutex */
 315long vhost_dev_check_owner(struct vhost_dev *dev)
 316{
 317        /* Are you the owner? If not, I don't think you mean to do that */
 318        return dev->mm == current->mm ? 0 : -EPERM;
 319}
 320
 321struct vhost_attach_cgroups_struct {
 322        struct vhost_work work;
 323        struct task_struct *owner;
 324        int ret;
 325};
 326
 327static void vhost_attach_cgroups_work(struct vhost_work *work)
 328{
 329        struct vhost_attach_cgroups_struct *s;
 330
 331        s = container_of(work, struct vhost_attach_cgroups_struct, work);
 332        s->ret = cgroup_attach_task_all(s->owner, current);
 333}
 334
 335static int vhost_attach_cgroups(struct vhost_dev *dev)
 336{
 337        struct vhost_attach_cgroups_struct attach;
 338
 339        attach.owner = current;
 340        vhost_work_init(&attach.work, vhost_attach_cgroups_work);
 341        vhost_work_queue(dev, &attach.work);
 342        vhost_work_flush(dev, &attach.work);
 343        return attach.ret;
 344}
 345
 346/* Caller should have device mutex */
 347bool vhost_dev_has_owner(struct vhost_dev *dev)
 348{
 349        return dev->mm;
 350}
 351
 352/* Caller should have device mutex */
 353long vhost_dev_set_owner(struct vhost_dev *dev)
 354{
 355        struct task_struct *worker;
 356        int err;
 357
 358        /* Is there an owner already? */
 359        if (vhost_dev_has_owner(dev)) {
 360                err = -EBUSY;
 361                goto err_mm;
 362        }
 363
 364        /* No owner, become one */
 365        dev->mm = get_task_mm(current);
 366        worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
 367        if (IS_ERR(worker)) {
 368                err = PTR_ERR(worker);
 369                goto err_worker;
 370        }
 371
 372        dev->worker = worker;
 373        wake_up_process(worker);        /* avoid contributing to loadavg */
 374
 375        err = vhost_attach_cgroups(dev);
 376        if (err)
 377                goto err_cgroup;
 378
 379        err = vhost_dev_alloc_iovecs(dev);
 380        if (err)
 381                goto err_cgroup;
 382
 383        return 0;
 384err_cgroup:
 385        kthread_stop(worker);
 386        dev->worker = NULL;
 387err_worker:
 388        if (dev->mm)
 389                mmput(dev->mm);
 390        dev->mm = NULL;
 391err_mm:
 392        return err;
 393}
 394
 395struct vhost_memory *vhost_dev_reset_owner_prepare(void)
 396{
 397        return kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
 398}
 399
 400/* Caller should have device mutex */
 401void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory)
 402{
 403        vhost_dev_cleanup(dev, true);
 404
 405        /* Restore memory to default empty mapping. */
 406        memory->nregions = 0;
 407        RCU_INIT_POINTER(dev->memory, memory);
 408}
 409
 410void vhost_dev_stop(struct vhost_dev *dev)
 411{
 412        int i;
 413
 414        for (i = 0; i < dev->nvqs; ++i) {
 415                if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
 416                        vhost_poll_stop(&dev->vqs[i]->poll);
 417                        vhost_poll_flush(&dev->vqs[i]->poll);
 418                }
 419        }
 420}
 421
 422/* Caller should have device mutex if and only if locked is set */
 423void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
 424{
 425        int i;
 426
 427        for (i = 0; i < dev->nvqs; ++i) {
 428                if (dev->vqs[i]->error_ctx)
 429                        eventfd_ctx_put(dev->vqs[i]->error_ctx);
 430                if (dev->vqs[i]->error)
 431                        fput(dev->vqs[i]->error);
 432                if (dev->vqs[i]->kick)
 433                        fput(dev->vqs[i]->kick);
 434                if (dev->vqs[i]->call_ctx)
 435                        eventfd_ctx_put(dev->vqs[i]->call_ctx);
 436                if (dev->vqs[i]->call)
 437                        fput(dev->vqs[i]->call);
 438                vhost_vq_reset(dev, dev->vqs[i]);
 439        }
 440        vhost_dev_free_iovecs(dev);
 441        if (dev->log_ctx)
 442                eventfd_ctx_put(dev->log_ctx);
 443        dev->log_ctx = NULL;
 444        if (dev->log_file)
 445                fput(dev->log_file);
 446        dev->log_file = NULL;
 447        /* No one will access memory at this point */
 448        kfree(rcu_dereference_protected(dev->memory,
 449                                        locked ==
 450                                                lockdep_is_held(&dev->mutex)));
 451        RCU_INIT_POINTER(dev->memory, NULL);
 452        WARN_ON(!list_empty(&dev->work_list));
 453        if (dev->worker) {
 454                kthread_stop(dev->worker);
 455                dev->worker = NULL;
 456        }
 457        if (dev->mm)
 458                mmput(dev->mm);
 459        dev->mm = NULL;
 460}
 461
 462static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
 463{
 464        u64 a = addr / VHOST_PAGE_SIZE / 8;
 465
 466        /* Make sure 64 bit math will not overflow. */
 467        if (a > ULONG_MAX - (unsigned long)log_base ||
 468            a + (unsigned long)log_base > ULONG_MAX)
 469                return 0;
 470
 471        return access_ok(VERIFY_WRITE, log_base + a,
 472                         (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
 473}
 474
 475/* Caller should have vq mutex and device mutex. */
 476static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
 477                               int log_all)
 478{
 479        int i;
 480
 481        if (!mem)
 482                return 0;
 483
 484        for (i = 0; i < mem->nregions; ++i) {
 485                struct vhost_memory_region *m = mem->regions + i;
 486                unsigned long a = m->userspace_addr;
 487                if (m->memory_size > ULONG_MAX)
 488                        return 0;
 489                else if (!access_ok(VERIFY_WRITE, (void __user *)a,
 490                                    m->memory_size))
 491                        return 0;
 492                else if (log_all && !log_access_ok(log_base,
 493                                                   m->guest_phys_addr,
 494                                                   m->memory_size))
 495                        return 0;
 496        }
 497        return 1;
 498}
 499
 500/* Can we switch to this memory table? */
 501/* Caller should have device mutex but not vq mutex */
 502static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
 503                            int log_all)
 504{
 505        int i;
 506
 507        for (i = 0; i < d->nvqs; ++i) {
 508                int ok;
 509                mutex_lock(&d->vqs[i]->mutex);
 510                /* If ring is inactive, will check when it's enabled. */
 511                if (d->vqs[i]->private_data)
 512                        ok = vq_memory_access_ok(d->vqs[i]->log_base, mem,
 513                                                 log_all);
 514                else
 515                        ok = 1;
 516                mutex_unlock(&d->vqs[i]->mutex);
 517                if (!ok)
 518                        return 0;
 519        }
 520        return 1;
 521}
 522
 523static int vq_access_ok(struct vhost_dev *d, unsigned int num,
 524                        struct vring_desc __user *desc,
 525                        struct vring_avail __user *avail,
 526                        struct vring_used __user *used)
 527{
 528        size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 529        return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
 530               access_ok(VERIFY_READ, avail,
 531                         sizeof *avail + num * sizeof *avail->ring + s) &&
 532               access_ok(VERIFY_WRITE, used,
 533                        sizeof *used + num * sizeof *used->ring + s);
 534}
 535
 536/* Can we log writes? */
 537/* Caller should have device mutex but not vq mutex */
 538int vhost_log_access_ok(struct vhost_dev *dev)
 539{
 540        struct vhost_memory *mp;
 541
 542        mp = rcu_dereference_protected(dev->memory,
 543                                       lockdep_is_held(&dev->mutex));
 544        return memory_access_ok(dev, mp, 1);
 545}
 546
 547/* Verify access for write logging. */
 548/* Caller should have vq mutex and device mutex */
 549static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq,
 550                            void __user *log_base)
 551{
 552        struct vhost_memory *mp;
 553        size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
 554
 555        mp = rcu_dereference_protected(vq->dev->memory,
 556                                       lockdep_is_held(&vq->mutex));
 557        return vq_memory_access_ok(log_base, mp,
 558                            vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
 559                (!vq->log_used || log_access_ok(log_base, vq->log_addr,
 560                                        sizeof *vq->used +
 561                                        vq->num * sizeof *vq->used->ring + s));
 562}
 563
 564/* Can we start vq? */
 565/* Caller should have vq mutex and device mutex */
 566int vhost_vq_access_ok(struct vhost_virtqueue *vq)
 567{
 568        return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) &&
 569                vq_log_access_ok(vq->dev, vq, vq->log_base);
 570}
 571
 572static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
 573{
 574        struct vhost_memory mem, *newmem, *oldmem;
 575        unsigned long size = offsetof(struct vhost_memory, regions);
 576
 577        if (copy_from_user(&mem, m, size))
 578                return -EFAULT;
 579        if (mem.padding)
 580                return -EOPNOTSUPP;
 581        if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
 582                return -E2BIG;
 583        newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
 584        if (!newmem)
 585                return -ENOMEM;
 586
 587        memcpy(newmem, &mem, size);
 588        if (copy_from_user(newmem->regions, m->regions,
 589                           mem.nregions * sizeof *m->regions)) {
 590                kfree(newmem);
 591                return -EFAULT;
 592        }
 593
 594        if (!memory_access_ok(d, newmem,
 595                              vhost_has_feature(d, VHOST_F_LOG_ALL))) {
 596                kfree(newmem);
 597                return -EFAULT;
 598        }
 599        oldmem = rcu_dereference_protected(d->memory,
 600                                           lockdep_is_held(&d->mutex));
 601        rcu_assign_pointer(d->memory, newmem);
 602        synchronize_rcu();
 603        kfree(oldmem);
 604        return 0;
 605}
 606
 607long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
 608{
 609        struct file *eventfp, *filep = NULL;
 610        bool pollstart = false, pollstop = false;
 611        struct eventfd_ctx *ctx = NULL;
 612        u32 __user *idxp = argp;
 613        struct vhost_virtqueue *vq;
 614        struct vhost_vring_state s;
 615        struct vhost_vring_file f;
 616        struct vhost_vring_addr a;
 617        u32 idx;
 618        long r;
 619
 620        r = get_user(idx, idxp);
 621        if (r < 0)
 622                return r;
 623        if (idx >= d->nvqs)
 624                return -ENOBUFS;
 625
 626        vq = d->vqs[idx];
 627
 628        mutex_lock(&vq->mutex);
 629
 630        switch (ioctl) {
 631        case VHOST_SET_VRING_NUM:
 632                /* Resizing ring with an active backend?
 633                 * You don't want to do that. */
 634                if (vq->private_data) {
 635                        r = -EBUSY;
 636                        break;
 637                }
 638                if (copy_from_user(&s, argp, sizeof s)) {
 639                        r = -EFAULT;
 640                        break;
 641                }
 642                if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
 643                        r = -EINVAL;
 644                        break;
 645                }
 646                vq->num = s.num;
 647                break;
 648        case VHOST_SET_VRING_BASE:
 649                /* Moving base with an active backend?
 650                 * You don't want to do that. */
 651                if (vq->private_data) {
 652                        r = -EBUSY;
 653                        break;
 654                }
 655                if (copy_from_user(&s, argp, sizeof s)) {
 656                        r = -EFAULT;
 657                        break;
 658                }
 659                if (s.num > 0xffff) {
 660                        r = -EINVAL;
 661                        break;
 662                }
 663                vq->last_avail_idx = s.num;
 664                /* Forget the cached index value. */
 665                vq->avail_idx = vq->last_avail_idx;
 666                break;
 667        case VHOST_GET_VRING_BASE:
 668                s.index = idx;
 669                s.num = vq->last_avail_idx;
 670                if (copy_to_user(argp, &s, sizeof s))
 671                        r = -EFAULT;
 672                break;
 673        case VHOST_SET_VRING_ADDR:
 674                if (copy_from_user(&a, argp, sizeof a)) {
 675                        r = -EFAULT;
 676                        break;
 677                }
 678                if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
 679                        r = -EOPNOTSUPP;
 680                        break;
 681                }
 682                /* For 32bit, verify that the top 32bits of the user
 683                   data are set to zero. */
 684                if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
 685                    (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
 686                    (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
 687                        r = -EFAULT;
 688                        break;
 689                }
 690                if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
 691                    (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
 692                    (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
 693                        r = -EINVAL;
 694                        break;
 695                }
 696
 697                /* We only verify access here if backend is configured.
 698                 * If it is not, we don't as size might not have been setup.
 699                 * We will verify when backend is configured. */
 700                if (vq->private_data) {
 701                        if (!vq_access_ok(d, vq->num,
 702                                (void __user *)(unsigned long)a.desc_user_addr,
 703                                (void __user *)(unsigned long)a.avail_user_addr,
 704                                (void __user *)(unsigned long)a.used_user_addr)) {
 705                                r = -EINVAL;
 706                                break;
 707                        }
 708
 709                        /* Also validate log access for used ring if enabled. */
 710                        if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
 711                            !log_access_ok(vq->log_base, a.log_guest_addr,
 712                                           sizeof *vq->used +
 713                                           vq->num * sizeof *vq->used->ring)) {
 714                                r = -EINVAL;
 715                                break;
 716                        }
 717                }
 718
 719                vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
 720                vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
 721                vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
 722                vq->log_addr = a.log_guest_addr;
 723                vq->used = (void __user *)(unsigned long)a.used_user_addr;
 724                break;
 725        case VHOST_SET_VRING_KICK:
 726                if (copy_from_user(&f, argp, sizeof f)) {
 727                        r = -EFAULT;
 728                        break;
 729                }
 730                eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
 731                if (IS_ERR(eventfp)) {
 732                        r = PTR_ERR(eventfp);
 733                        break;
 734                }
 735                if (eventfp != vq->kick) {
 736                        pollstop = (filep = vq->kick) != NULL;
 737                        pollstart = (vq->kick = eventfp) != NULL;
 738                } else
 739                        filep = eventfp;
 740                break;
 741        case VHOST_SET_VRING_CALL:
 742                if (copy_from_user(&f, argp, sizeof f)) {
 743                        r = -EFAULT;
 744                        break;
 745                }
 746                eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
 747                if (IS_ERR(eventfp)) {
 748                        r = PTR_ERR(eventfp);
 749                        break;
 750                }
 751                if (eventfp != vq->call) {
 752                        filep = vq->call;
 753                        ctx = vq->call_ctx;
 754                        vq->call = eventfp;
 755                        vq->call_ctx = eventfp ?
 756                                eventfd_ctx_fileget(eventfp) : NULL;
 757                } else
 758                        filep = eventfp;
 759                break;
 760        case VHOST_SET_VRING_ERR:
 761                if (copy_from_user(&f, argp, sizeof f)) {
 762                        r = -EFAULT;
 763                        break;
 764                }
 765                eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
 766                if (IS_ERR(eventfp)) {
 767                        r = PTR_ERR(eventfp);
 768                        break;
 769                }
 770                if (eventfp != vq->error) {
 771                        filep = vq->error;
 772                        vq->error = eventfp;
 773                        ctx = vq->error_ctx;
 774                        vq->error_ctx = eventfp ?
 775                                eventfd_ctx_fileget(eventfp) : NULL;
 776                } else
 777                        filep = eventfp;
 778                break;
 779        default:
 780                r = -ENOIOCTLCMD;
 781        }
 782
 783        if (pollstop && vq->handle_kick)
 784                vhost_poll_stop(&vq->poll);
 785
 786        if (ctx)
 787                eventfd_ctx_put(ctx);
 788        if (filep)
 789                fput(filep);
 790
 791        if (pollstart && vq->handle_kick)
 792                r = vhost_poll_start(&vq->poll, vq->kick);
 793
 794        mutex_unlock(&vq->mutex);
 795
 796        if (pollstop && vq->handle_kick)
 797                vhost_poll_flush(&vq->poll);
 798        return r;
 799}
 800
 801/* Caller must have device mutex */
 802long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
 803{
 804        struct file *eventfp, *filep = NULL;
 805        struct eventfd_ctx *ctx = NULL;
 806        u64 p;
 807        long r;
 808        int i, fd;
 809
 810        /* If you are not the owner, you can become one */
 811        if (ioctl == VHOST_SET_OWNER) {
 812                r = vhost_dev_set_owner(d);
 813                goto done;
 814        }
 815
 816        /* You must be the owner to do anything else */
 817        r = vhost_dev_check_owner(d);
 818        if (r)
 819                goto done;
 820
 821        switch (ioctl) {
 822        case VHOST_SET_MEM_TABLE:
 823                r = vhost_set_memory(d, argp);
 824                break;
 825        case VHOST_SET_LOG_BASE:
 826                if (copy_from_user(&p, argp, sizeof p)) {
 827                        r = -EFAULT;
 828                        break;
 829                }
 830                if ((u64)(unsigned long)p != p) {
 831                        r = -EFAULT;
 832                        break;
 833                }
 834                for (i = 0; i < d->nvqs; ++i) {
 835                        struct vhost_virtqueue *vq;
 836                        void __user *base = (void __user *)(unsigned long)p;
 837                        vq = d->vqs[i];
 838                        mutex_lock(&vq->mutex);
 839                        /* If ring is inactive, will check when it's enabled. */
 840                        if (vq->private_data && !vq_log_access_ok(d, vq, base))
 841                                r = -EFAULT;
 842                        else
 843                                vq->log_base = base;
 844                        mutex_unlock(&vq->mutex);
 845                }
 846                break;
 847        case VHOST_SET_LOG_FD:
 848                r = get_user(fd, (int __user *)argp);
 849                if (r < 0)
 850                        break;
 851                eventfp = fd == -1 ? NULL : eventfd_fget(fd);
 852                if (IS_ERR(eventfp)) {
 853                        r = PTR_ERR(eventfp);
 854                        break;
 855                }
 856                if (eventfp != d->log_file) {
 857                        filep = d->log_file;
 858                        ctx = d->log_ctx;
 859                        d->log_ctx = eventfp ?
 860                                eventfd_ctx_fileget(eventfp) : NULL;
 861                } else
 862                        filep = eventfp;
 863                for (i = 0; i < d->nvqs; ++i) {
 864                        mutex_lock(&d->vqs[i]->mutex);
 865                        d->vqs[i]->log_ctx = d->log_ctx;
 866                        mutex_unlock(&d->vqs[i]->mutex);
 867                }
 868                if (ctx)
 869                        eventfd_ctx_put(ctx);
 870                if (filep)
 871                        fput(filep);
 872                break;
 873        default:
 874                r = -ENOIOCTLCMD;
 875                break;
 876        }
 877done:
 878        return r;
 879}
 880
 881static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
 882                                                     __u64 addr, __u32 len)
 883{
 884        struct vhost_memory_region *reg;
 885        int i;
 886
 887        /* linear search is not brilliant, but we really have on the order of 6
 888         * regions in practice */
 889        for (i = 0; i < mem->nregions; ++i) {
 890                reg = mem->regions + i;
 891                if (reg->guest_phys_addr <= addr &&
 892                    reg->guest_phys_addr + reg->memory_size - 1 >= addr)
 893                        return reg;
 894        }
 895        return NULL;
 896}
 897
 898/* TODO: This is really inefficient.  We need something like get_user()
 899 * (instruction directly accesses the data, with an exception table entry
 900 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
 901 */
 902static int set_bit_to_user(int nr, void __user *addr)
 903{
 904        unsigned long log = (unsigned long)addr;
 905        struct page *page;
 906        void *base;
 907        int bit = nr + (log % PAGE_SIZE) * 8;
 908        int r;
 909
 910        r = get_user_pages_fast(log, 1, 1, &page);
 911        if (r < 0)
 912                return r;
 913        BUG_ON(r != 1);
 914        base = kmap_atomic(page);
 915        set_bit(bit, base);
 916        kunmap_atomic(base);
 917        set_page_dirty_lock(page);
 918        put_page(page);
 919        return 0;
 920}
 921
 922static int log_write(void __user *log_base,
 923                     u64 write_address, u64 write_length)
 924{
 925        u64 write_page = write_address / VHOST_PAGE_SIZE;
 926        int r;
 927
 928        if (!write_length)
 929                return 0;
 930        write_length += write_address % VHOST_PAGE_SIZE;
 931        for (;;) {
 932                u64 base = (u64)(unsigned long)log_base;
 933                u64 log = base + write_page / 8;
 934                int bit = write_page % 8;
 935                if ((u64)(unsigned long)log != log)
 936                        return -EFAULT;
 937                r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
 938                if (r < 0)
 939                        return r;
 940                if (write_length <= VHOST_PAGE_SIZE)
 941                        break;
 942                write_length -= VHOST_PAGE_SIZE;
 943                write_page += 1;
 944        }
 945        return r;
 946}
 947
 948int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
 949                    unsigned int log_num, u64 len)
 950{
 951        int i, r;
 952
 953        /* Make sure data written is seen before log. */
 954        smp_wmb();
 955        for (i = 0; i < log_num; ++i) {
 956                u64 l = min(log[i].len, len);
 957                r = log_write(vq->log_base, log[i].addr, l);
 958                if (r < 0)
 959                        return r;
 960                len -= l;
 961                if (!len) {
 962                        if (vq->log_ctx)
 963                                eventfd_signal(vq->log_ctx, 1);
 964                        return 0;
 965                }
 966        }
 967        /* Length written exceeds what we have stored. This is a bug. */
 968        BUG();
 969        return 0;
 970}
 971
 972static int vhost_update_used_flags(struct vhost_virtqueue *vq)
 973{
 974        void __user *used;
 975        if (__put_user(vq->used_flags, &vq->used->flags) < 0)
 976                return -EFAULT;
 977        if (unlikely(vq->log_used)) {
 978                /* Make sure the flag is seen before log. */
 979                smp_wmb();
 980                /* Log used flag write. */
 981                used = &vq->used->flags;
 982                log_write(vq->log_base, vq->log_addr +
 983                          (used - (void __user *)vq->used),
 984                          sizeof vq->used->flags);
 985                if (vq->log_ctx)
 986                        eventfd_signal(vq->log_ctx, 1);
 987        }
 988        return 0;
 989}
 990
 991static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
 992{
 993        if (__put_user(vq->avail_idx, vhost_avail_event(vq)))
 994                return -EFAULT;
 995        if (unlikely(vq->log_used)) {
 996                void __user *used;
 997                /* Make sure the event is seen before log. */
 998                smp_wmb();
 999                /* Log avail event write */
1000                used = vhost_avail_event(vq);
1001                log_write(vq->log_base, vq->log_addr +
1002                          (used - (void __user *)vq->used),
1003                          sizeof *vhost_avail_event(vq));
1004                if (vq->log_ctx)
1005                        eventfd_signal(vq->log_ctx, 1);
1006        }
1007        return 0;
1008}
1009
1010int vhost_init_used(struct vhost_virtqueue *vq)
1011{
1012        int r;
1013        if (!vq->private_data)
1014                return 0;
1015
1016        r = vhost_update_used_flags(vq);
1017        if (r)
1018                return r;
1019        vq->signalled_used_valid = false;
1020        return get_user(vq->last_used_idx, &vq->used->idx);
1021}
1022
1023static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
1024                          struct iovec iov[], int iov_size)
1025{
1026        const struct vhost_memory_region *reg;
1027        struct vhost_memory *mem;
1028        struct iovec *_iov;
1029        u64 s = 0;
1030        int ret = 0;
1031
1032        rcu_read_lock();
1033
1034        mem = rcu_dereference(dev->memory);
1035        while ((u64)len > s) {
1036                u64 size;
1037                if (unlikely(ret >= iov_size)) {
1038                        ret = -ENOBUFS;
1039                        break;
1040                }
1041                reg = find_region(mem, addr, len);
1042                if (unlikely(!reg)) {
1043                        ret = -EFAULT;
1044                        break;
1045                }
1046                _iov = iov + ret;
1047                size = reg->memory_size - addr + reg->guest_phys_addr;
1048                _iov->iov_len = min((u64)len - s, size);
1049                _iov->iov_base = (void __user *)(unsigned long)
1050                        (reg->userspace_addr + addr - reg->guest_phys_addr);
1051                s += size;
1052                addr += size;
1053                ++ret;
1054        }
1055
1056        rcu_read_unlock();
1057        return ret;
1058}
1059
1060/* Each buffer in the virtqueues is actually a chain of descriptors.  This
1061 * function returns the next descriptor in the chain,
1062 * or -1U if we're at the end. */
1063static unsigned next_desc(struct vring_desc *desc)
1064{
1065        unsigned int next;
1066
1067        /* If this descriptor says it doesn't chain, we're done. */
1068        if (!(desc->flags & VRING_DESC_F_NEXT))
1069                return -1U;
1070
1071        /* Check they're not leading us off end of descriptors. */
1072        next = desc->next;
1073        /* Make sure compiler knows to grab that: we don't want it changing! */
1074        /* We will use the result as an index in an array, so most
1075         * architectures only need a compiler barrier here. */
1076        read_barrier_depends();
1077
1078        return next;
1079}
1080
1081static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1082                        struct iovec iov[], unsigned int iov_size,
1083                        unsigned int *out_num, unsigned int *in_num,
1084                        struct vhost_log *log, unsigned int *log_num,
1085                        struct vring_desc *indirect)
1086{
1087        struct vring_desc desc;
1088        unsigned int i = 0, count, found = 0;
1089        int ret;
1090
1091        /* Sanity check */
1092        if (unlikely(indirect->len % sizeof desc)) {
1093                vq_err(vq, "Invalid length in indirect descriptor: "
1094                       "len 0x%llx not multiple of 0x%zx\n",
1095                       (unsigned long long)indirect->len,
1096                       sizeof desc);
1097                return -EINVAL;
1098        }
1099
1100        ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
1101                             UIO_MAXIOV);
1102        if (unlikely(ret < 0)) {
1103                vq_err(vq, "Translation failure %d in indirect.\n", ret);
1104                return ret;
1105        }
1106
1107        /* We will use the result as an address to read from, so most
1108         * architectures only need a compiler barrier here. */
1109        read_barrier_depends();
1110
1111        count = indirect->len / sizeof desc;
1112        /* Buffers are chained via a 16 bit next field, so
1113         * we can have at most 2^16 of these. */
1114        if (unlikely(count > USHRT_MAX + 1)) {
1115                vq_err(vq, "Indirect buffer length too big: %d\n",
1116                       indirect->len);
1117                return -E2BIG;
1118        }
1119
1120        do {
1121                unsigned iov_count = *in_num + *out_num;
1122                if (unlikely(++found > count)) {
1123                        vq_err(vq, "Loop detected: last one at %u "
1124                               "indirect size %u\n",
1125                               i, count);
1126                        return -EINVAL;
1127                }
1128                if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
1129                                              vq->indirect, sizeof desc))) {
1130                        vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
1131                               i, (size_t)indirect->addr + i * sizeof desc);
1132                        return -EINVAL;
1133                }
1134                if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
1135                        vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
1136                               i, (size_t)indirect->addr + i * sizeof desc);
1137                        return -EINVAL;
1138                }
1139
1140                ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
1141                                     iov_size - iov_count);
1142                if (unlikely(ret < 0)) {
1143                        vq_err(vq, "Translation failure %d indirect idx %d\n",
1144                               ret, i);
1145                        return ret;
1146                }
1147                /* If this is an input descriptor, increment that count. */
1148                if (desc.flags & VRING_DESC_F_WRITE) {
1149                        *in_num += ret;
1150                        if (unlikely(log)) {
1151                                log[*log_num].addr = desc.addr;
1152                                log[*log_num].len = desc.len;
1153                                ++*log_num;
1154                        }
1155                } else {
1156                        /* If it's an output descriptor, they're all supposed
1157                         * to come before any input descriptors. */
1158                        if (unlikely(*in_num)) {
1159                                vq_err(vq, "Indirect descriptor "
1160                                       "has out after in: idx %d\n", i);
1161                                return -EINVAL;
1162                        }
1163                        *out_num += ret;
1164                }
1165        } while ((i = next_desc(&desc)) != -1);
1166        return 0;
1167}
1168
1169/* This looks in the virtqueue and for the first available buffer, and converts
1170 * it to an iovec for convenient access.  Since descriptors consist of some
1171 * number of output then some number of input descriptors, it's actually two
1172 * iovecs, but we pack them into one and note how many of each there were.
1173 *
1174 * This function returns the descriptor number found, or vq->num (which is
1175 * never a valid descriptor number) if none was found.  A negative code is
1176 * returned on error. */
1177int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1178                      struct iovec iov[], unsigned int iov_size,
1179                      unsigned int *out_num, unsigned int *in_num,
1180                      struct vhost_log *log, unsigned int *log_num)
1181{
1182        struct vring_desc desc;
1183        unsigned int i, head, found = 0;
1184        u16 last_avail_idx;
1185        int ret;
1186
1187        /* Check it isn't doing very strange things with descriptor numbers. */
1188        last_avail_idx = vq->last_avail_idx;
1189        if (unlikely(__get_user(vq->avail_idx, &vq->avail->idx))) {
1190                vq_err(vq, "Failed to access avail idx at %p\n",
1191                       &vq->avail->idx);
1192                return -EFAULT;
1193        }
1194
1195        if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
1196                vq_err(vq, "Guest moved used index from %u to %u",
1197                       last_avail_idx, vq->avail_idx);
1198                return -EFAULT;
1199        }
1200
1201        /* If there's nothing new since last we looked, return invalid. */
1202        if (vq->avail_idx == last_avail_idx)
1203                return vq->num;
1204
1205        /* Only get avail ring entries after they have been exposed by guest. */
1206        smp_rmb();
1207
1208        /* Grab the next descriptor number they're advertising, and increment
1209         * the index we've seen. */
1210        if (unlikely(__get_user(head,
1211                                &vq->avail->ring[last_avail_idx % vq->num]))) {
1212                vq_err(vq, "Failed to read head: idx %d address %p\n",
1213                       last_avail_idx,
1214                       &vq->avail->ring[last_avail_idx % vq->num]);
1215                return -EFAULT;
1216        }
1217
1218        /* If their number is silly, that's an error. */
1219        if (unlikely(head >= vq->num)) {
1220                vq_err(vq, "Guest says index %u > %u is available",
1221                       head, vq->num);
1222                return -EINVAL;
1223        }
1224
1225        /* When we start there are none of either input nor output. */
1226        *out_num = *in_num = 0;
1227        if (unlikely(log))
1228                *log_num = 0;
1229
1230        i = head;
1231        do {
1232                unsigned iov_count = *in_num + *out_num;
1233                if (unlikely(i >= vq->num)) {
1234                        vq_err(vq, "Desc index is %u > %u, head = %u",
1235                               i, vq->num, head);
1236                        return -EINVAL;
1237                }
1238                if (unlikely(++found > vq->num)) {
1239                        vq_err(vq, "Loop detected: last one at %u "
1240                               "vq size %u head %u\n",
1241                               i, vq->num, head);
1242                        return -EINVAL;
1243                }
1244                ret = __copy_from_user(&desc, vq->desc + i, sizeof desc);
1245                if (unlikely(ret)) {
1246                        vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
1247                               i, vq->desc + i);
1248                        return -EFAULT;
1249                }
1250                if (desc.flags & VRING_DESC_F_INDIRECT) {
1251                        ret = get_indirect(dev, vq, iov, iov_size,
1252                                           out_num, in_num,
1253                                           log, log_num, &desc);
1254                        if (unlikely(ret < 0)) {
1255                                vq_err(vq, "Failure detected "
1256                                       "in indirect descriptor at idx %d\n", i);
1257                                return ret;
1258                        }
1259                        continue;
1260                }
1261
1262                ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
1263                                     iov_size - iov_count);
1264                if (unlikely(ret < 0)) {
1265                        vq_err(vq, "Translation failure %d descriptor idx %d\n",
1266                               ret, i);
1267                        return ret;
1268                }
1269                if (desc.flags & VRING_DESC_F_WRITE) {
1270                        /* If this is an input descriptor,
1271                         * increment that count. */
1272                        *in_num += ret;
1273                        if (unlikely(log)) {
1274                                log[*log_num].addr = desc.addr;
1275                                log[*log_num].len = desc.len;
1276                                ++*log_num;
1277                        }
1278                } else {
1279                        /* If it's an output descriptor, they're all supposed
1280                         * to come before any input descriptors. */
1281                        if (unlikely(*in_num)) {
1282                                vq_err(vq, "Descriptor has out after in: "
1283                                       "idx %d\n", i);
1284                                return -EINVAL;
1285                        }
1286                        *out_num += ret;
1287                }
1288        } while ((i = next_desc(&desc)) != -1);
1289
1290        /* On success, increment avail index. */
1291        vq->last_avail_idx++;
1292
1293        /* Assume notifications from guest are disabled at this point,
1294         * if they aren't we would need to update avail_event index. */
1295        BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
1296        return head;
1297}
1298
1299/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
1300void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
1301{
1302        vq->last_avail_idx -= n;
1303}
1304
1305/* After we've used one of their buffers, we tell them about it.  We'll then
1306 * want to notify the guest, using eventfd. */
1307int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
1308{
1309        struct vring_used_elem __user *used;
1310
1311        /* The virtqueue contains a ring of used buffers.  Get a pointer to the
1312         * next entry in that used ring. */
1313        used = &vq->used->ring[vq->last_used_idx % vq->num];
1314        if (__put_user(head, &used->id)) {
1315                vq_err(vq, "Failed to write used id");
1316                return -EFAULT;
1317        }
1318        if (__put_user(len, &used->len)) {
1319                vq_err(vq, "Failed to write used len");
1320                return -EFAULT;
1321        }
1322        /* Make sure buffer is written before we update index. */
1323        smp_wmb();
1324        if (__put_user(vq->last_used_idx + 1, &vq->used->idx)) {
1325                vq_err(vq, "Failed to increment used idx");
1326                return -EFAULT;
1327        }
1328        if (unlikely(vq->log_used)) {
1329                /* Make sure data is seen before log. */
1330                smp_wmb();
1331                /* Log used ring entry write. */
1332                log_write(vq->log_base,
1333                          vq->log_addr +
1334                           ((void __user *)used - (void __user *)vq->used),
1335                          sizeof *used);
1336                /* Log used index update. */
1337                log_write(vq->log_base,
1338                          vq->log_addr + offsetof(struct vring_used, idx),
1339                          sizeof vq->used->idx);
1340                if (vq->log_ctx)
1341                        eventfd_signal(vq->log_ctx, 1);
1342        }
1343        vq->last_used_idx++;
1344        /* If the driver never bothers to signal in a very long while,
1345         * used index might wrap around. If that happens, invalidate
1346         * signalled_used index we stored. TODO: make sure driver
1347         * signals at least once in 2^16 and remove this. */
1348        if (unlikely(vq->last_used_idx == vq->signalled_used))
1349                vq->signalled_used_valid = false;
1350        return 0;
1351}
1352
1353static int __vhost_add_used_n(struct vhost_virtqueue *vq,
1354                            struct vring_used_elem *heads,
1355                            unsigned count)
1356{
1357        struct vring_used_elem __user *used;
1358        u16 old, new;
1359        int start;
1360
1361        start = vq->last_used_idx % vq->num;
1362        used = vq->used->ring + start;
1363        if (__copy_to_user(used, heads, count * sizeof *used)) {
1364                vq_err(vq, "Failed to write used");
1365                return -EFAULT;
1366        }
1367        if (unlikely(vq->log_used)) {
1368                /* Make sure data is seen before log. */
1369                smp_wmb();
1370                /* Log used ring entry write. */
1371                log_write(vq->log_base,
1372                          vq->log_addr +
1373                           ((void __user *)used - (void __user *)vq->used),
1374                          count * sizeof *used);
1375        }
1376        old = vq->last_used_idx;
1377        new = (vq->last_used_idx += count);
1378        /* If the driver never bothers to signal in a very long while,
1379         * used index might wrap around. If that happens, invalidate
1380         * signalled_used index we stored. TODO: make sure driver
1381         * signals at least once in 2^16 and remove this. */
1382        if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
1383                vq->signalled_used_valid = false;
1384        return 0;
1385}
1386
1387/* After we've used one of their buffers, we tell them about it.  We'll then
1388 * want to notify the guest, using eventfd. */
1389int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
1390                     unsigned count)
1391{
1392        int start, n, r;
1393
1394        start = vq->last_used_idx % vq->num;
1395        n = vq->num - start;
1396        if (n < count) {
1397                r = __vhost_add_used_n(vq, heads, n);
1398                if (r < 0)
1399                        return r;
1400                heads += n;
1401                count -= n;
1402        }
1403        r = __vhost_add_used_n(vq, heads, count);
1404
1405        /* Make sure buffer is written before we update index. */
1406        smp_wmb();
1407        if (put_user(vq->last_used_idx, &vq->used->idx)) {
1408                vq_err(vq, "Failed to increment used idx");
1409                return -EFAULT;
1410        }
1411        if (unlikely(vq->log_used)) {
1412                /* Log used index update. */
1413                log_write(vq->log_base,
1414                          vq->log_addr + offsetof(struct vring_used, idx),
1415                          sizeof vq->used->idx);
1416                if (vq->log_ctx)
1417                        eventfd_signal(vq->log_ctx, 1);
1418        }
1419        return r;
1420}
1421
1422static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1423{
1424        __u16 old, new, event;
1425        bool v;
1426        /* Flush out used index updates. This is paired
1427         * with the barrier that the Guest executes when enabling
1428         * interrupts. */
1429        smp_mb();
1430
1431        if (vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1432            unlikely(vq->avail_idx == vq->last_avail_idx))
1433                return true;
1434
1435        if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1436                __u16 flags;
1437                if (__get_user(flags, &vq->avail->flags)) {
1438                        vq_err(vq, "Failed to get flags");
1439                        return true;
1440                }
1441                return !(flags & VRING_AVAIL_F_NO_INTERRUPT);
1442        }
1443        old = vq->signalled_used;
1444        v = vq->signalled_used_valid;
1445        new = vq->signalled_used = vq->last_used_idx;
1446        vq->signalled_used_valid = true;
1447
1448        if (unlikely(!v))
1449                return true;
1450
1451        if (get_user(event, vhost_used_event(vq))) {
1452                vq_err(vq, "Failed to get used event idx");
1453                return true;
1454        }
1455        return vring_need_event(event, new, old);
1456}
1457
1458/* This actually signals the guest, using eventfd. */
1459void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1460{
1461        /* Signal the Guest tell them we used something up. */
1462        if (vq->call_ctx && vhost_notify(dev, vq))
1463                eventfd_signal(vq->call_ctx, 1);
1464}
1465
1466/* And here's the combo meal deal.  Supersize me! */
1467void vhost_add_used_and_signal(struct vhost_dev *dev,
1468                               struct vhost_virtqueue *vq,
1469                               unsigned int head, int len)
1470{
1471        vhost_add_used(vq, head, len);
1472        vhost_signal(dev, vq);
1473}
1474
1475/* multi-buffer version of vhost_add_used_and_signal */
1476void vhost_add_used_and_signal_n(struct vhost_dev *dev,
1477                                 struct vhost_virtqueue *vq,
1478                                 struct vring_used_elem *heads, unsigned count)
1479{
1480        vhost_add_used_n(vq, heads, count);
1481        vhost_signal(dev, vq);
1482}
1483
1484/* OK, now we need to know about added descriptors. */
1485bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1486{
1487        u16 avail_idx;
1488        int r;
1489
1490        if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
1491                return false;
1492        vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
1493        if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1494                r = vhost_update_used_flags(vq);
1495                if (r) {
1496                        vq_err(vq, "Failed to enable notification at %p: %d\n",
1497                               &vq->used->flags, r);
1498                        return false;
1499                }
1500        } else {
1501                r = vhost_update_avail_event(vq, vq->avail_idx);
1502                if (r) {
1503                        vq_err(vq, "Failed to update avail event index at %p: %d\n",
1504                               vhost_avail_event(vq), r);
1505                        return false;
1506                }
1507        }
1508        /* They could have slipped one in as we were doing that: make
1509         * sure it's written, then check again. */
1510        smp_mb();
1511        r = __get_user(avail_idx, &vq->avail->idx);
1512        if (r) {
1513                vq_err(vq, "Failed to check avail idx at %p: %d\n",
1514                       &vq->avail->idx, r);
1515                return false;
1516        }
1517
1518        return avail_idx != vq->avail_idx;
1519}
1520
1521/* We don't need to be notified again. */
1522void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1523{
1524        int r;
1525
1526        if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
1527                return;
1528        vq->used_flags |= VRING_USED_F_NO_NOTIFY;
1529        if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1530                r = vhost_update_used_flags(vq);
1531                if (r)
1532                        vq_err(vq, "Failed to enable notification at %p: %d\n",
1533                               &vq->used->flags, r);
1534        }
1535}
1536
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.