linux/drivers/vhost/test.c
<<
>>
Prefs
   1/* Copyright (C) 2009 Red Hat, Inc.
   2 * Author: Michael S. Tsirkin <mst@redhat.com>
   3 *
   4 * This work is licensed under the terms of the GNU GPL, version 2.
   5 *
   6 * test virtio server in host kernel.
   7 */
   8
   9#include <linux/compat.h>
  10#include <linux/eventfd.h>
  11#include <linux/vhost.h>
  12#include <linux/miscdevice.h>
  13#include <linux/module.h>
  14#include <linux/mutex.h>
  15#include <linux/workqueue.h>
  16#include <linux/rcupdate.h>
  17#include <linux/file.h>
  18#include <linux/slab.h>
  19
  20#include "test.h"
  21#include "vhost.c"
  22
  23/* Max number of bytes transferred before requeueing the job.
  24 * Using this limit prevents one virtqueue from starving others. */
  25#define VHOST_TEST_WEIGHT 0x80000
  26
  27enum {
  28        VHOST_TEST_VQ = 0,
  29        VHOST_TEST_VQ_MAX = 1,
  30};
  31
  32struct vhost_test {
  33        struct vhost_dev dev;
  34        struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
  35};
  36
  37/* Expects to be always run from workqueue - which acts as
  38 * read-size critical section for our kind of RCU. */
  39static void handle_vq(struct vhost_test *n)
  40{
  41        struct vhost_virtqueue *vq = &n->dev.vqs[VHOST_TEST_VQ];
  42        unsigned out, in;
  43        int head;
  44        size_t len, total_len = 0;
  45        void *private;
  46
  47        private = rcu_dereference_check(vq->private_data, 1);
  48        if (!private)
  49                return;
  50
  51        mutex_lock(&vq->mutex);
  52        vhost_disable_notify(&n->dev, vq);
  53
  54        for (;;) {
  55                head = vhost_get_vq_desc(&n->dev, vq, vq->iov,
  56                                         ARRAY_SIZE(vq->iov),
  57                                         &out, &in,
  58                                         NULL, NULL);
  59                /* On error, stop handling until the next kick. */
  60                if (unlikely(head < 0))
  61                        break;
  62                /* Nothing new?  Wait for eventfd to tell us they refilled. */
  63                if (head == vq->num) {
  64                        if (unlikely(vhost_enable_notify(&n->dev, vq))) {
  65                                vhost_disable_notify(&n->dev, vq);
  66                                continue;
  67                        }
  68                        break;
  69                }
  70                if (in) {
  71                        vq_err(vq, "Unexpected descriptor format for TX: "
  72                               "out %d, int %d\n", out, in);
  73                        break;
  74                }
  75                len = iov_length(vq->iov, out);
  76                /* Sanity check */
  77                if (!len) {
  78                        vq_err(vq, "Unexpected 0 len for TX\n");
  79                        break;
  80                }
  81                vhost_add_used_and_signal(&n->dev, vq, head, 0);
  82                total_len += len;
  83                if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
  84                        vhost_poll_queue(&vq->poll);
  85                        break;
  86                }
  87        }
  88
  89        mutex_unlock(&vq->mutex);
  90}
  91
  92static void handle_vq_kick(struct vhost_work *work)
  93{
  94        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
  95                                                  poll.work);
  96        struct vhost_test *n = container_of(vq->dev, struct vhost_test, dev);
  97
  98        handle_vq(n);
  99}
 100
 101static int vhost_test_open(struct inode *inode, struct file *f)
 102{
 103        struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
 104        struct vhost_dev *dev;
 105        int r;
 106
 107        if (!n)
 108                return -ENOMEM;
 109
 110        dev = &n->dev;
 111        n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
 112        r = vhost_dev_init(dev, n->vqs, VHOST_TEST_VQ_MAX);
 113        if (r < 0) {
 114                kfree(n);
 115                return r;
 116        }
 117
 118        f->private_data = n;
 119
 120        return 0;
 121}
 122
 123static void *vhost_test_stop_vq(struct vhost_test *n,
 124                                struct vhost_virtqueue *vq)
 125{
 126        void *private;
 127
 128        mutex_lock(&vq->mutex);
 129        private = rcu_dereference_protected(vq->private_data,
 130                                         lockdep_is_held(&vq->mutex));
 131        rcu_assign_pointer(vq->private_data, NULL);
 132        mutex_unlock(&vq->mutex);
 133        return private;
 134}
 135
 136static void vhost_test_stop(struct vhost_test *n, void **privatep)
 137{
 138        *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
 139}
 140
 141static void vhost_test_flush_vq(struct vhost_test *n, int index)
 142{
 143        vhost_poll_flush(&n->dev.vqs[index].poll);
 144}
 145
 146static void vhost_test_flush(struct vhost_test *n)
 147{
 148        vhost_test_flush_vq(n, VHOST_TEST_VQ);
 149}
 150
 151static int vhost_test_release(struct inode *inode, struct file *f)
 152{
 153        struct vhost_test *n = f->private_data;
 154        void  *private;
 155
 156        vhost_test_stop(n, &private);
 157        vhost_test_flush(n);
 158        vhost_dev_cleanup(&n->dev, false);
 159        /* We do an extra flush before freeing memory,
 160         * since jobs can re-queue themselves. */
 161        vhost_test_flush(n);
 162        kfree(n);
 163        return 0;
 164}
 165
 166static long vhost_test_run(struct vhost_test *n, int test)
 167{
 168        void *priv, *oldpriv;
 169        struct vhost_virtqueue *vq;
 170        int r, index;
 171
 172        if (test < 0 || test > 1)
 173                return -EINVAL;
 174
 175        mutex_lock(&n->dev.mutex);
 176        r = vhost_dev_check_owner(&n->dev);
 177        if (r)
 178                goto err;
 179
 180        for (index = 0; index < n->dev.nvqs; ++index) {
 181                /* Verify that ring has been setup correctly. */
 182                if (!vhost_vq_access_ok(&n->vqs[index])) {
 183                        r = -EFAULT;
 184                        goto err;
 185                }
 186        }
 187
 188        for (index = 0; index < n->dev.nvqs; ++index) {
 189                vq = n->vqs + index;
 190                mutex_lock(&vq->mutex);
 191                priv = test ? n : NULL;
 192
 193                /* start polling new socket */
 194                oldpriv = rcu_dereference_protected(vq->private_data,
 195                                                    lockdep_is_held(&vq->mutex));
 196                rcu_assign_pointer(vq->private_data, priv);
 197
 198                r = vhost_init_used(&n->vqs[index]);
 199
 200                mutex_unlock(&vq->mutex);
 201
 202                if (r)
 203                        goto err;
 204
 205                if (oldpriv) {
 206                        vhost_test_flush_vq(n, index);
 207                }
 208        }
 209
 210        mutex_unlock(&n->dev.mutex);
 211        return 0;
 212
 213err:
 214        mutex_unlock(&n->dev.mutex);
 215        return r;
 216}
 217
 218static long vhost_test_reset_owner(struct vhost_test *n)
 219{
 220        void *priv = NULL;
 221        long err;
 222        mutex_lock(&n->dev.mutex);
 223        err = vhost_dev_check_owner(&n->dev);
 224        if (err)
 225                goto done;
 226        vhost_test_stop(n, &priv);
 227        vhost_test_flush(n);
 228        err = vhost_dev_reset_owner(&n->dev);
 229done:
 230        mutex_unlock(&n->dev.mutex);
 231        return err;
 232}
 233
 234static int vhost_test_set_features(struct vhost_test *n, u64 features)
 235{
 236        mutex_lock(&n->dev.mutex);
 237        if ((features & (1 << VHOST_F_LOG_ALL)) &&
 238            !vhost_log_access_ok(&n->dev)) {
 239                mutex_unlock(&n->dev.mutex);
 240                return -EFAULT;
 241        }
 242        n->dev.acked_features = features;
 243        smp_wmb();
 244        vhost_test_flush(n);
 245        mutex_unlock(&n->dev.mutex);
 246        return 0;
 247}
 248
 249static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
 250                             unsigned long arg)
 251{
 252        struct vhost_test *n = f->private_data;
 253        void __user *argp = (void __user *)arg;
 254        u64 __user *featurep = argp;
 255        int test;
 256        u64 features;
 257        int r;
 258        switch (ioctl) {
 259        case VHOST_TEST_RUN:
 260                if (copy_from_user(&test, argp, sizeof test))
 261                        return -EFAULT;
 262                return vhost_test_run(n, test);
 263        case VHOST_GET_FEATURES:
 264                features = VHOST_NET_FEATURES;
 265                if (copy_to_user(featurep, &features, sizeof features))
 266                        return -EFAULT;
 267                return 0;
 268        case VHOST_SET_FEATURES:
 269                if (copy_from_user(&features, featurep, sizeof features))
 270                        return -EFAULT;
 271                if (features & ~VHOST_NET_FEATURES)
 272                        return -EOPNOTSUPP;
 273                return vhost_test_set_features(n, features);
 274        case VHOST_RESET_OWNER:
 275                return vhost_test_reset_owner(n);
 276        default:
 277                mutex_lock(&n->dev.mutex);
 278                r = vhost_dev_ioctl(&n->dev, ioctl, arg);
 279                vhost_test_flush(n);
 280                mutex_unlock(&n->dev.mutex);
 281                return r;
 282        }
 283}
 284
 285#ifdef CONFIG_COMPAT
 286static long vhost_test_compat_ioctl(struct file *f, unsigned int ioctl,
 287                                   unsigned long arg)
 288{
 289        return vhost_test_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
 290}
 291#endif
 292
 293static const struct file_operations vhost_test_fops = {
 294        .owner          = THIS_MODULE,
 295        .release        = vhost_test_release,
 296        .unlocked_ioctl = vhost_test_ioctl,
 297#ifdef CONFIG_COMPAT
 298        .compat_ioctl   = vhost_test_compat_ioctl,
 299#endif
 300        .open           = vhost_test_open,
 301        .llseek         = noop_llseek,
 302};
 303
 304static struct miscdevice vhost_test_misc = {
 305        MISC_DYNAMIC_MINOR,
 306        "vhost-test",
 307        &vhost_test_fops,
 308};
 309
 310static int vhost_test_init(void)
 311{
 312        return misc_register(&vhost_test_misc);
 313}
 314module_init(vhost_test_init);
 315
 316static void vhost_test_exit(void)
 317{
 318        misc_deregister(&vhost_test_misc);
 319}
 320module_exit(vhost_test_exit);
 321
 322MODULE_VERSION("0.0.1");
 323MODULE_LICENSE("GPL v2");
 324MODULE_AUTHOR("Michael S. Tsirkin");
 325MODULE_DESCRIPTION("Host kernel side for virtio simulator");
 326
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.