linux/drivers/virtio/virtio_ring.c
<<
>>
Prefs
   1/* Virtio ring implementation.
   2 *
   3 *  Copyright 2007 Rusty Russell IBM Corporation
   4 *
   5 *  This program is free software; you can redistribute it and/or modify
   6 *  it under the terms of the GNU General Public License as published by
   7 *  the Free Software Foundation; either version 2 of the License, or
   8 *  (at your option) any later version.
   9 *
  10 *  This program is distributed in the hope that it will be useful,
  11 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 *  GNU General Public License for more details.
  14 *
  15 *  You should have received a copy of the GNU General Public License
  16 *  along with this program; if not, write to the Free Software
  17 *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  18 */
  19#include <linux/virtio.h>
  20#include <linux/virtio_ring.h>
  21#include <linux/virtio_config.h>
  22#include <linux/device.h>
  23
  24#ifdef DEBUG
  25/* For development, we want to crash whenever the ring is screwed. */
  26#define BAD_RING(vq, fmt...)                    \
  27        do { dev_err(&vq->vq.vdev->dev, fmt); BUG(); } while(0)
  28#define START_USE(vq) \
  29        do { if ((vq)->in_use) panic("in_use = %i\n", (vq)->in_use); (vq)->in_use = __LINE__; mb(); } while(0)
  30#define END_USE(vq) \
  31        do { BUG_ON(!(vq)->in_use); (vq)->in_use = 0; mb(); } while(0)
  32#else
  33#define BAD_RING(vq, fmt...)                    \
  34        do { dev_err(&vq->vq.vdev->dev, fmt); (vq)->broken = true; } while(0)
  35#define START_USE(vq)
  36#define END_USE(vq)
  37#endif
  38
  39struct vring_virtqueue
  40{
  41        struct virtqueue vq;
  42
  43        /* Actual memory layout for this queue */
  44        struct vring vring;
  45
  46        /* Other side has made a mess, don't try any more. */
  47        bool broken;
  48
  49        /* Number of free buffers */
  50        unsigned int num_free;
  51        /* Head of free buffer list. */
  52        unsigned int free_head;
  53        /* Number we've added since last sync. */
  54        unsigned int num_added;
  55
  56        /* Last used index we've seen. */
  57        u16 last_used_idx;
  58
  59        /* How to notify other side. FIXME: commonalize hcalls! */
  60        void (*notify)(struct virtqueue *vq);
  61
  62#ifdef DEBUG
  63        /* They're supposed to lock for us. */
  64        unsigned int in_use;
  65#endif
  66
  67        /* Tokens for callbacks. */
  68        void *data[];
  69};
  70
  71#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
  72
  73static int vring_add_buf(struct virtqueue *_vq,
  74                         struct scatterlist sg[],
  75                         unsigned int out,
  76                         unsigned int in,
  77                         void *data)
  78{
  79        struct vring_virtqueue *vq = to_vvq(_vq);
  80        unsigned int i, avail, head, uninitialized_var(prev);
  81
  82        BUG_ON(data == NULL);
  83        BUG_ON(out + in > vq->vring.num);
  84        BUG_ON(out + in == 0);
  85
  86        START_USE(vq);
  87
  88        if (vq->num_free < out + in) {
  89                pr_debug("Can't add buf len %i - avail = %i\n",
  90                         out + in, vq->num_free);
  91                /* FIXME: for historical reasons, we force a notify here if
  92                 * there are outgoing parts to the buffer.  Presumably the
  93                 * host should service the ring ASAP. */
  94                if (out)
  95                        vq->notify(&vq->vq);
  96                END_USE(vq);
  97                return -ENOSPC;
  98        }
  99
 100        /* We're about to use some buffers from the free list. */
 101        vq->num_free -= out + in;
 102
 103        head = vq->free_head;
 104        for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
 105                vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
 106                vq->vring.desc[i].addr = sg_phys(sg);
 107                vq->vring.desc[i].len = sg->length;
 108                prev = i;
 109                sg++;
 110        }
 111        for (; in; i = vq->vring.desc[i].next, in--) {
 112                vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
 113                vq->vring.desc[i].addr = sg_phys(sg);
 114                vq->vring.desc[i].len = sg->length;
 115                prev = i;
 116                sg++;
 117        }
 118        /* Last one doesn't continue. */
 119        vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
 120
 121        /* Update free pointer */
 122        vq->free_head = i;
 123
 124        /* Set token. */
 125        vq->data[head] = data;
 126
 127        /* Put entry in available array (but don't update avail->idx until they
 128         * do sync).  FIXME: avoid modulus here? */
 129        avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num;
 130        vq->vring.avail->ring[avail] = head;
 131
 132        pr_debug("Added buffer head %i to %p\n", head, vq);
 133        END_USE(vq);
 134        return 0;
 135}
 136
 137static void vring_kick(struct virtqueue *_vq)
 138{
 139        struct vring_virtqueue *vq = to_vvq(_vq);
 140        START_USE(vq);
 141        /* Descriptors and available array need to be set before we expose the
 142         * new available array entries. */
 143        wmb();
 144
 145        vq->vring.avail->idx += vq->num_added;
 146        vq->num_added = 0;
 147
 148        /* Need to update avail index before checking if we should notify */
 149        mb();
 150
 151        if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
 152                /* Prod other side to tell it about changes. */
 153                vq->notify(&vq->vq);
 154
 155        END_USE(vq);
 156}
 157
 158static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
 159{
 160        unsigned int i;
 161
 162        /* Clear data ptr. */
 163        vq->data[head] = NULL;
 164
 165        /* Put back on free list: find end */
 166        i = head;
 167        while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
 168                i = vq->vring.desc[i].next;
 169                vq->num_free++;
 170        }
 171
 172        vq->vring.desc[i].next = vq->free_head;
 173        vq->free_head = head;
 174        /* Plus final descriptor */
 175        vq->num_free++;
 176}
 177
 178static inline bool more_used(const struct vring_virtqueue *vq)
 179{
 180        return vq->last_used_idx != vq->vring.used->idx;
 181}
 182
 183static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
 184{
 185        struct vring_virtqueue *vq = to_vvq(_vq);
 186        void *ret;
 187        unsigned int i;
 188
 189        START_USE(vq);
 190
 191        if (unlikely(vq->broken)) {
 192                END_USE(vq);
 193                return NULL;
 194        }
 195
 196        if (!more_used(vq)) {
 197                pr_debug("No more buffers in queue\n");
 198                END_USE(vq);
 199                return NULL;
 200        }
 201
 202        i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
 203        *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
 204
 205        if (unlikely(i >= vq->vring.num)) {
 206                BAD_RING(vq, "id %u out of range\n", i);
 207                return NULL;
 208        }
 209        if (unlikely(!vq->data[i])) {
 210                BAD_RING(vq, "id %u is not a head!\n", i);
 211                return NULL;
 212        }
 213
 214        /* detach_buf clears data, so grab it now. */
 215        ret = vq->data[i];
 216        detach_buf(vq, i);
 217        vq->last_used_idx++;
 218        END_USE(vq);
 219        return ret;
 220}
 221
 222static void vring_disable_cb(struct virtqueue *_vq)
 223{
 224        struct vring_virtqueue *vq = to_vvq(_vq);
 225
 226        vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
 227}
 228
 229static bool vring_enable_cb(struct virtqueue *_vq)
 230{
 231        struct vring_virtqueue *vq = to_vvq(_vq);
 232
 233        START_USE(vq);
 234
 235        /* We optimistically turn back on interrupts, then check if there was
 236         * more to do. */
 237        vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
 238        mb();
 239        if (unlikely(more_used(vq))) {
 240                END_USE(vq);
 241                return false;
 242        }
 243
 244        END_USE(vq);
 245        return true;
 246}
 247
 248irqreturn_t vring_interrupt(int irq, void *_vq)
 249{
 250        struct vring_virtqueue *vq = to_vvq(_vq);
 251
 252        if (!more_used(vq)) {
 253                pr_debug("virtqueue interrupt with no work for %p\n", vq);
 254                return IRQ_NONE;
 255        }
 256
 257        if (unlikely(vq->broken))
 258                return IRQ_HANDLED;
 259
 260        pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
 261        if (vq->vq.callback)
 262                vq->vq.callback(&vq->vq);
 263
 264        return IRQ_HANDLED;
 265}
 266EXPORT_SYMBOL_GPL(vring_interrupt);
 267
 268static struct virtqueue_ops vring_vq_ops = {
 269        .add_buf = vring_add_buf,
 270        .get_buf = vring_get_buf,
 271        .kick = vring_kick,
 272        .disable_cb = vring_disable_cb,
 273        .enable_cb = vring_enable_cb,
 274};
 275
 276struct virtqueue *vring_new_virtqueue(unsigned int num,
 277                                      struct virtio_device *vdev,
 278                                      void *pages,
 279                                      void (*notify)(struct virtqueue *),
 280                                      void (*callback)(struct virtqueue *))
 281{
 282        struct vring_virtqueue *vq;
 283        unsigned int i;
 284
 285        /* We assume num is a power of 2. */
 286        if (num & (num - 1)) {
 287                dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
 288                return NULL;
 289        }
 290
 291        vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
 292        if (!vq)
 293                return NULL;
 294
 295        vring_init(&vq->vring, num, pages, PAGE_SIZE);
 296        vq->vq.callback = callback;
 297        vq->vq.vdev = vdev;
 298        vq->vq.vq_ops = &vring_vq_ops;
 299        vq->notify = notify;
 300        vq->broken = false;
 301        vq->last_used_idx = 0;
 302        vq->num_added = 0;
 303#ifdef DEBUG
 304        vq->in_use = false;
 305#endif
 306
 307        /* No callback?  Tell other side not to bother us. */
 308        if (!callback)
 309                vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
 310
 311        /* Put everything in free lists. */
 312        vq->num_free = num;
 313        vq->free_head = 0;
 314        for (i = 0; i < num-1; i++)
 315                vq->vring.desc[i].next = i+1;
 316
 317        return &vq->vq;
 318}
 319EXPORT_SYMBOL_GPL(vring_new_virtqueue);
 320
 321void vring_del_virtqueue(struct virtqueue *vq)
 322{
 323        kfree(to_vvq(vq));
 324}
 325EXPORT_SYMBOL_GPL(vring_del_virtqueue);
 326
 327/* Manipulates transport-specific feature bits. */
 328void vring_transport_features(struct virtio_device *vdev)
 329{
 330        unsigned int i;
 331
 332        for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
 333                switch (i) {
 334                default:
 335                        /* We don't understand this bit. */
 336                        clear_bit(i, vdev->features);
 337                }
 338        }
 339}
 340EXPORT_SYMBOL_GPL(vring_transport_features);
 341
 342MODULE_LICENSE("GPL");
 343
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.