linux/drivers/virtio/virtio_ring.c
<<
>>
Prefs
   1/* Virtio ring implementation.
   2 *
   3 *  Copyright 2007 Rusty Russell IBM Corporation
   4 *
   5 *  This program is free software; you can redistribute it and/or modify
   6 *  it under the terms of the GNU General Public License as published by
   7 *  the Free Software Foundation; either version 2 of the License, or
   8 *  (at your option) any later version.
   9 *
  10 *  This program is distributed in the hope that it will be useful,
  11 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 *  GNU General Public License for more details.
  14 *
  15 *  You should have received a copy of the GNU General Public License
  16 *  along with this program; if not, write to the Free Software
  17 *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  18 */
  19#include <linux/virtio.h>
  20#include <linux/virtio_ring.h>
  21#include <linux/virtio_config.h>
  22#include <linux/device.h>
  23#include <linux/slab.h>
  24#include <linux/module.h>
  25#include <linux/hrtimer.h>
  26
  27/* virtio guest is communicating with a virtual "device" that actually runs on
  28 * a host processor.  Memory barriers are used to control SMP effects. */
  29#ifdef CONFIG_SMP
  30/* Where possible, use SMP barriers which are more lightweight than mandatory
  31 * barriers, because mandatory barriers control MMIO effects on accesses
  32 * through relaxed memory I/O windows (which virtio-pci does not use). */
  33#define virtio_mb(vq) \
  34        do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0)
  35#define virtio_rmb(vq) \
  36        do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
  37#define virtio_wmb(vq) \
  38        do { if ((vq)->weak_barriers) smp_wmb(); else wmb(); } while(0)
  39#else
  40/* We must force memory ordering even if guest is UP since host could be
  41 * running on another CPU, but SMP barriers are defined to barrier() in that
  42 * configuration. So fall back to mandatory barriers instead. */
  43#define virtio_mb(vq) mb()
  44#define virtio_rmb(vq) rmb()
  45#define virtio_wmb(vq) wmb()
  46#endif
  47
  48#ifdef DEBUG
  49/* For development, we want to crash whenever the ring is screwed. */
  50#define BAD_RING(_vq, fmt, args...)                             \
  51        do {                                                    \
  52                dev_err(&(_vq)->vq.vdev->dev,                   \
  53                        "%s:"fmt, (_vq)->vq.name, ##args);      \
  54                BUG();                                          \
  55        } while (0)
  56/* Caller is supposed to guarantee no reentry. */
  57#define START_USE(_vq)                                          \
  58        do {                                                    \
  59                if ((_vq)->in_use)                              \
  60                        panic("%s:in_use = %i\n",               \
  61                              (_vq)->vq.name, (_vq)->in_use);   \
  62                (_vq)->in_use = __LINE__;                       \
  63        } while (0)
  64#define END_USE(_vq) \
  65        do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
  66#else
  67#define BAD_RING(_vq, fmt, args...)                             \
  68        do {                                                    \
  69                dev_err(&_vq->vq.vdev->dev,                     \
  70                        "%s:"fmt, (_vq)->vq.name, ##args);      \
  71                (_vq)->broken = true;                           \
  72        } while (0)
  73#define START_USE(vq)
  74#define END_USE(vq)
  75#endif
  76
  77struct vring_virtqueue
  78{
  79        struct virtqueue vq;
  80
  81        /* Actual memory layout for this queue */
  82        struct vring vring;
  83
  84        /* Can we use weak barriers? */
  85        bool weak_barriers;
  86
  87        /* Other side has made a mess, don't try any more. */
  88        bool broken;
  89
  90        /* Host supports indirect buffers */
  91        bool indirect;
  92
  93        /* Host publishes avail event idx */
  94        bool event;
  95
  96        /* Head of free buffer list. */
  97        unsigned int free_head;
  98        /* Number we've added since last sync. */
  99        unsigned int num_added;
 100
 101        /* Last used index we've seen. */
 102        u16 last_used_idx;
 103
 104        /* How to notify other side. FIXME: commonalize hcalls! */
 105        void (*notify)(struct virtqueue *vq);
 106
 107#ifdef DEBUG
 108        /* They're supposed to lock for us. */
 109        unsigned int in_use;
 110
 111        /* Figure out if their kicks are too delayed. */
 112        bool last_add_time_valid;
 113        ktime_t last_add_time;
 114#endif
 115
 116        /* Tokens for callbacks. */
 117        void *data[];
 118};
 119
 120#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
 121
 122/* Set up an indirect table of descriptors and add it to the queue. */
 123static int vring_add_indirect(struct vring_virtqueue *vq,
 124                              struct scatterlist sg[],
 125                              unsigned int out,
 126                              unsigned int in,
 127                              gfp_t gfp)
 128{
 129        struct vring_desc *desc;
 130        unsigned head;
 131        int i;
 132
 133        /*
 134         * We require lowmem mappings for the descriptors because
 135         * otherwise virt_to_phys will give us bogus addresses in the
 136         * virtqueue.
 137         */
 138        gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH);
 139
 140        desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
 141        if (!desc)
 142                return -ENOMEM;
 143
 144        /* Transfer entries from the sg list into the indirect page */
 145        for (i = 0; i < out; i++) {
 146                desc[i].flags = VRING_DESC_F_NEXT;
 147                desc[i].addr = sg_phys(sg);
 148                desc[i].len = sg->length;
 149                desc[i].next = i+1;
 150                sg++;
 151        }
 152        for (; i < (out + in); i++) {
 153                desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
 154                desc[i].addr = sg_phys(sg);
 155                desc[i].len = sg->length;
 156                desc[i].next = i+1;
 157                sg++;
 158        }
 159
 160        /* Last one doesn't continue. */
 161        desc[i-1].flags &= ~VRING_DESC_F_NEXT;
 162        desc[i-1].next = 0;
 163
 164        /* We're about to use a buffer */
 165        vq->vq.num_free--;
 166
 167        /* Use a single buffer which doesn't continue */
 168        head = vq->free_head;
 169        vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
 170        vq->vring.desc[head].addr = virt_to_phys(desc);
 171        vq->vring.desc[head].len = i * sizeof(struct vring_desc);
 172
 173        /* Update free pointer */
 174        vq->free_head = vq->vring.desc[head].next;
 175
 176        return head;
 177}
 178
 179/**
 180 * virtqueue_add_buf - expose buffer to other end
 181 * @vq: the struct virtqueue we're talking about.
 182 * @sg: the description of the buffer(s).
 183 * @out_num: the number of sg readable by other side
 184 * @in_num: the number of sg which are writable (after readable ones)
 185 * @data: the token identifying the buffer.
 186 * @gfp: how to do memory allocations (if necessary).
 187 *
 188 * Caller must ensure we don't call this with other virtqueue operations
 189 * at the same time (except where noted).
 190 *
 191 * Returns zero or a negative error (ie. ENOSPC, ENOMEM).
 192 */
 193int virtqueue_add_buf(struct virtqueue *_vq,
 194                      struct scatterlist sg[],
 195                      unsigned int out,
 196                      unsigned int in,
 197                      void *data,
 198                      gfp_t gfp)
 199{
 200        struct vring_virtqueue *vq = to_vvq(_vq);
 201        unsigned int i, avail, uninitialized_var(prev);
 202        int head;
 203
 204        START_USE(vq);
 205
 206        BUG_ON(data == NULL);
 207
 208#ifdef DEBUG
 209        {
 210                ktime_t now = ktime_get();
 211
 212                /* No kick or get, with .1 second between?  Warn. */
 213                if (vq->last_add_time_valid)
 214                        WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
 215                                            > 100);
 216                vq->last_add_time = now;
 217                vq->last_add_time_valid = true;
 218        }
 219#endif
 220
 221        /* If the host supports indirect descriptor tables, and we have multiple
 222         * buffers, then go indirect. FIXME: tune this threshold */
 223        if (vq->indirect && (out + in) > 1 && vq->vq.num_free) {
 224                head = vring_add_indirect(vq, sg, out, in, gfp);
 225                if (likely(head >= 0))
 226                        goto add_head;
 227        }
 228
 229        BUG_ON(out + in > vq->vring.num);
 230        BUG_ON(out + in == 0);
 231
 232        if (vq->vq.num_free < out + in) {
 233                pr_debug("Can't add buf len %i - avail = %i\n",
 234                         out + in, vq->vq.num_free);
 235                /* FIXME: for historical reasons, we force a notify here if
 236                 * there are outgoing parts to the buffer.  Presumably the
 237                 * host should service the ring ASAP. */
 238                if (out)
 239                        vq->notify(&vq->vq);
 240                END_USE(vq);
 241                return -ENOSPC;
 242        }
 243
 244        /* We're about to use some buffers from the free list. */
 245        vq->vq.num_free -= out + in;
 246
 247        head = vq->free_head;
 248        for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
 249                vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
 250                vq->vring.desc[i].addr = sg_phys(sg);
 251                vq->vring.desc[i].len = sg->length;
 252                prev = i;
 253                sg++;
 254        }
 255        for (; in; i = vq->vring.desc[i].next, in--) {
 256                vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
 257                vq->vring.desc[i].addr = sg_phys(sg);
 258                vq->vring.desc[i].len = sg->length;
 259                prev = i;
 260                sg++;
 261        }
 262        /* Last one doesn't continue. */
 263        vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
 264
 265        /* Update free pointer */
 266        vq->free_head = i;
 267
 268add_head:
 269        /* Set token. */
 270        vq->data[head] = data;
 271
 272        /* Put entry in available array (but don't update avail->idx until they
 273         * do sync). */
 274        avail = (vq->vring.avail->idx & (vq->vring.num-1));
 275        vq->vring.avail->ring[avail] = head;
 276
 277        /* Descriptors and available array need to be set before we expose the
 278         * new available array entries. */
 279        virtio_wmb(vq);
 280        vq->vring.avail->idx++;
 281        vq->num_added++;
 282
 283        /* This is very unlikely, but theoretically possible.  Kick
 284         * just in case. */
 285        if (unlikely(vq->num_added == (1 << 16) - 1))
 286                virtqueue_kick(_vq);
 287
 288        pr_debug("Added buffer head %i to %p\n", head, vq);
 289        END_USE(vq);
 290
 291        return 0;
 292}
 293EXPORT_SYMBOL_GPL(virtqueue_add_buf);
 294
 295/**
 296 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
 297 * @vq: the struct virtqueue
 298 *
 299 * Instead of virtqueue_kick(), you can do:
 300 *      if (virtqueue_kick_prepare(vq))
 301 *              virtqueue_notify(vq);
 302 *
 303 * This is sometimes useful because the virtqueue_kick_prepare() needs
 304 * to be serialized, but the actual virtqueue_notify() call does not.
 305 */
 306bool virtqueue_kick_prepare(struct virtqueue *_vq)
 307{
 308        struct vring_virtqueue *vq = to_vvq(_vq);
 309        u16 new, old;
 310        bool needs_kick;
 311
 312        START_USE(vq);
 313        /* We need to expose available array entries before checking avail
 314         * event. */
 315        virtio_mb(vq);
 316
 317        old = vq->vring.avail->idx - vq->num_added;
 318        new = vq->vring.avail->idx;
 319        vq->num_added = 0;
 320
 321#ifdef DEBUG
 322        if (vq->last_add_time_valid) {
 323                WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
 324                                              vq->last_add_time)) > 100);
 325        }
 326        vq->last_add_time_valid = false;
 327#endif
 328
 329        if (vq->event) {
 330                needs_kick = vring_need_event(vring_avail_event(&vq->vring),
 331                                              new, old);
 332        } else {
 333                needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
 334        }
 335        END_USE(vq);
 336        return needs_kick;
 337}
 338EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
 339
 340/**
 341 * virtqueue_notify - second half of split virtqueue_kick call.
 342 * @vq: the struct virtqueue
 343 *
 344 * This does not need to be serialized.
 345 */
 346void virtqueue_notify(struct virtqueue *_vq)
 347{
 348        struct vring_virtqueue *vq = to_vvq(_vq);
 349
 350        /* Prod other side to tell it about changes. */
 351        vq->notify(_vq);
 352}
 353EXPORT_SYMBOL_GPL(virtqueue_notify);
 354
 355/**
 356 * virtqueue_kick - update after add_buf
 357 * @vq: the struct virtqueue
 358 *
 359 * After one or more virtqueue_add_buf calls, invoke this to kick
 360 * the other side.
 361 *
 362 * Caller must ensure we don't call this with other virtqueue
 363 * operations at the same time (except where noted).
 364 */
 365void virtqueue_kick(struct virtqueue *vq)
 366{
 367        if (virtqueue_kick_prepare(vq))
 368                virtqueue_notify(vq);
 369}
 370EXPORT_SYMBOL_GPL(virtqueue_kick);
 371
 372static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
 373{
 374        unsigned int i;
 375
 376        /* Clear data ptr. */
 377        vq->data[head] = NULL;
 378
 379        /* Put back on free list: find end */
 380        i = head;
 381
 382        /* Free the indirect table */
 383        if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
 384                kfree(phys_to_virt(vq->vring.desc[i].addr));
 385
 386        while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
 387                i = vq->vring.desc[i].next;
 388                vq->vq.num_free++;
 389        }
 390
 391        vq->vring.desc[i].next = vq->free_head;
 392        vq->free_head = head;
 393        /* Plus final descriptor */
 394        vq->vq.num_free++;
 395}
 396
 397static inline bool more_used(const struct vring_virtqueue *vq)
 398{
 399        return vq->last_used_idx != vq->vring.used->idx;
 400}
 401
 402/**
 403 * virtqueue_get_buf - get the next used buffer
 404 * @vq: the struct virtqueue we're talking about.
 405 * @len: the length written into the buffer
 406 *
 407 * If the driver wrote data into the buffer, @len will be set to the
 408 * amount written.  This means you don't need to clear the buffer
 409 * beforehand to ensure there's no data leakage in the case of short
 410 * writes.
 411 *
 412 * Caller must ensure we don't call this with other virtqueue
 413 * operations at the same time (except where noted).
 414 *
 415 * Returns NULL if there are no used buffers, or the "data" token
 416 * handed to virtqueue_add_buf().
 417 */
 418void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
 419{
 420        struct vring_virtqueue *vq = to_vvq(_vq);
 421        void *ret;
 422        unsigned int i;
 423        u16 last_used;
 424
 425        START_USE(vq);
 426
 427        if (unlikely(vq->broken)) {
 428                END_USE(vq);
 429                return NULL;
 430        }
 431
 432        if (!more_used(vq)) {
 433                pr_debug("No more buffers in queue\n");
 434                END_USE(vq);
 435                return NULL;
 436        }
 437
 438        /* Only get used array entries after they have been exposed by host. */
 439        virtio_rmb(vq);
 440
 441        last_used = (vq->last_used_idx & (vq->vring.num - 1));
 442        i = vq->vring.used->ring[last_used].id;
 443        *len = vq->vring.used->ring[last_used].len;
 444
 445        if (unlikely(i >= vq->vring.num)) {
 446                BAD_RING(vq, "id %u out of range\n", i);
 447                return NULL;
 448        }
 449        if (unlikely(!vq->data[i])) {
 450                BAD_RING(vq, "id %u is not a head!\n", i);
 451                return NULL;
 452        }
 453
 454        /* detach_buf clears data, so grab it now. */
 455        ret = vq->data[i];
 456        detach_buf(vq, i);
 457        vq->last_used_idx++;
 458        /* If we expect an interrupt for the next entry, tell host
 459         * by writing event index and flush out the write before
 460         * the read in the next get_buf call. */
 461        if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
 462                vring_used_event(&vq->vring) = vq->last_used_idx;
 463                virtio_mb(vq);
 464        }
 465
 466#ifdef DEBUG
 467        vq->last_add_time_valid = false;
 468#endif
 469
 470        END_USE(vq);
 471        return ret;
 472}
 473EXPORT_SYMBOL_GPL(virtqueue_get_buf);
 474
 475/**
 476 * virtqueue_disable_cb - disable callbacks
 477 * @vq: the struct virtqueue we're talking about.
 478 *
 479 * Note that this is not necessarily synchronous, hence unreliable and only
 480 * useful as an optimization.
 481 *
 482 * Unlike other operations, this need not be serialized.
 483 */
 484void virtqueue_disable_cb(struct virtqueue *_vq)
 485{
 486        struct vring_virtqueue *vq = to_vvq(_vq);
 487
 488        vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
 489}
 490EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
 491
 492/**
 493 * virtqueue_enable_cb - restart callbacks after disable_cb.
 494 * @vq: the struct virtqueue we're talking about.
 495 *
 496 * This re-enables callbacks; it returns "false" if there are pending
 497 * buffers in the queue, to detect a possible race between the driver
 498 * checking for more work, and enabling callbacks.
 499 *
 500 * Caller must ensure we don't call this with other virtqueue
 501 * operations at the same time (except where noted).
 502 */
 503bool virtqueue_enable_cb(struct virtqueue *_vq)
 504{
 505        struct vring_virtqueue *vq = to_vvq(_vq);
 506
 507        START_USE(vq);
 508
 509        /* We optimistically turn back on interrupts, then check if there was
 510         * more to do. */
 511        /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
 512         * either clear the flags bit or point the event index at the next
 513         * entry. Always do both to keep code simple. */
 514        vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
 515        vring_used_event(&vq->vring) = vq->last_used_idx;
 516        virtio_mb(vq);
 517        if (unlikely(more_used(vq))) {
 518                END_USE(vq);
 519                return false;
 520        }
 521
 522        END_USE(vq);
 523        return true;
 524}
 525EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
 526
 527/**
 528 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
 529 * @vq: the struct virtqueue we're talking about.
 530 *
 531 * This re-enables callbacks but hints to the other side to delay
 532 * interrupts until most of the available buffers have been processed;
 533 * it returns "false" if there are many pending buffers in the queue,
 534 * to detect a possible race between the driver checking for more work,
 535 * and enabling callbacks.
 536 *
 537 * Caller must ensure we don't call this with other virtqueue
 538 * operations at the same time (except where noted).
 539 */
 540bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
 541{
 542        struct vring_virtqueue *vq = to_vvq(_vq);
 543        u16 bufs;
 544
 545        START_USE(vq);
 546
 547        /* We optimistically turn back on interrupts, then check if there was
 548         * more to do. */
 549        /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
 550         * either clear the flags bit or point the event index at the next
 551         * entry. Always do both to keep code simple. */
 552        vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
 553        /* TODO: tune this threshold */
 554        bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
 555        vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
 556        virtio_mb(vq);
 557        if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
 558                END_USE(vq);
 559                return false;
 560        }
 561
 562        END_USE(vq);
 563        return true;
 564}
 565EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
 566
 567/**
 568 * virtqueue_detach_unused_buf - detach first unused buffer
 569 * @vq: the struct virtqueue we're talking about.
 570 *
 571 * Returns NULL or the "data" token handed to virtqueue_add_buf().
 572 * This is not valid on an active queue; it is useful only for device
 573 * shutdown.
 574 */
 575void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
 576{
 577        struct vring_virtqueue *vq = to_vvq(_vq);
 578        unsigned int i;
 579        void *buf;
 580
 581        START_USE(vq);
 582
 583        for (i = 0; i < vq->vring.num; i++) {
 584                if (!vq->data[i])
 585                        continue;
 586                /* detach_buf clears data, so grab it now. */
 587                buf = vq->data[i];
 588                detach_buf(vq, i);
 589                vq->vring.avail->idx--;
 590                END_USE(vq);
 591                return buf;
 592        }
 593        /* That should have freed everything. */
 594        BUG_ON(vq->vq.num_free != vq->vring.num);
 595
 596        END_USE(vq);
 597        return NULL;
 598}
 599EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
 600
 601irqreturn_t vring_interrupt(int irq, void *_vq)
 602{
 603        struct vring_virtqueue *vq = to_vvq(_vq);
 604
 605        if (!more_used(vq)) {
 606                pr_debug("virtqueue interrupt with no work for %p\n", vq);
 607                return IRQ_NONE;
 608        }
 609
 610        if (unlikely(vq->broken))
 611                return IRQ_HANDLED;
 612
 613        pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
 614        if (vq->vq.callback)
 615                vq->vq.callback(&vq->vq);
 616
 617        return IRQ_HANDLED;
 618}
 619EXPORT_SYMBOL_GPL(vring_interrupt);
 620
 621struct virtqueue *vring_new_virtqueue(unsigned int index,
 622                                      unsigned int num,
 623                                      unsigned int vring_align,
 624                                      struct virtio_device *vdev,
 625                                      bool weak_barriers,
 626                                      void *pages,
 627                                      void (*notify)(struct virtqueue *),
 628                                      void (*callback)(struct virtqueue *),
 629                                      const char *name)
 630{
 631        struct vring_virtqueue *vq;
 632        unsigned int i;
 633
 634        /* We assume num is a power of 2. */
 635        if (num & (num - 1)) {
 636                dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
 637                return NULL;
 638        }
 639
 640        vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
 641        if (!vq)
 642                return NULL;
 643
 644        vring_init(&vq->vring, num, pages, vring_align);
 645        vq->vq.callback = callback;
 646        vq->vq.vdev = vdev;
 647        vq->vq.name = name;
 648        vq->vq.num_free = num;
 649        vq->vq.index = index;
 650        vq->notify = notify;
 651        vq->weak_barriers = weak_barriers;
 652        vq->broken = false;
 653        vq->last_used_idx = 0;
 654        vq->num_added = 0;
 655        list_add_tail(&vq->vq.list, &vdev->vqs);
 656#ifdef DEBUG
 657        vq->in_use = false;
 658        vq->last_add_time_valid = false;
 659#endif
 660
 661        vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
 662        vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
 663
 664        /* No callback?  Tell other side not to bother us. */
 665        if (!callback)
 666                vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
 667
 668        /* Put everything in free lists. */
 669        vq->free_head = 0;
 670        for (i = 0; i < num-1; i++) {
 671                vq->vring.desc[i].next = i+1;
 672                vq->data[i] = NULL;
 673        }
 674        vq->data[i] = NULL;
 675
 676        return &vq->vq;
 677}
 678EXPORT_SYMBOL_GPL(vring_new_virtqueue);
 679
 680void vring_del_virtqueue(struct virtqueue *vq)
 681{
 682        list_del(&vq->list);
 683        kfree(to_vvq(vq));
 684}
 685EXPORT_SYMBOL_GPL(vring_del_virtqueue);
 686
 687/* Manipulates transport-specific feature bits. */
 688void vring_transport_features(struct virtio_device *vdev)
 689{
 690        unsigned int i;
 691
 692        for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
 693                switch (i) {
 694                case VIRTIO_RING_F_INDIRECT_DESC:
 695                        break;
 696                case VIRTIO_RING_F_EVENT_IDX:
 697                        break;
 698                default:
 699                        /* We don't understand this bit. */
 700                        clear_bit(i, vdev->features);
 701                }
 702        }
 703}
 704EXPORT_SYMBOL_GPL(vring_transport_features);
 705
 706/**
 707 * virtqueue_get_vring_size - return the size of the virtqueue's vring
 708 * @vq: the struct virtqueue containing the vring of interest.
 709 *
 710 * Returns the size of the vring.  This is mainly used for boasting to
 711 * userspace.  Unlike other operations, this need not be serialized.
 712 */
 713unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
 714{
 715
 716        struct vring_virtqueue *vq = to_vvq(_vq);
 717
 718        return vq->vring.num;
 719}
 720EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
 721
 722MODULE_LICENSE("GPL");
 723
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.