linux/drivers/vhost/net.c
<<
>>
Prefs
   1/* Copyright (C) 2009 Red Hat, Inc.
   2 * Author: Michael S. Tsirkin <mst@redhat.com>
   3 *
   4 * This work is licensed under the terms of the GNU GPL, version 2.
   5 *
   6 * virtio-net server in host kernel.
   7 */
   8
   9#include <linux/compat.h>
  10#include <linux/eventfd.h>
  11#include <linux/vhost.h>
  12#include <linux/virtio_net.h>
  13#include <linux/miscdevice.h>
  14#include <linux/module.h>
  15#include <linux/moduleparam.h>
  16#include <linux/mutex.h>
  17#include <linux/workqueue.h>
  18#include <linux/rcupdate.h>
  19#include <linux/file.h>
  20#include <linux/slab.h>
  21
  22#include <linux/net.h>
  23#include <linux/if_packet.h>
  24#include <linux/if_arp.h>
  25#include <linux/if_tun.h>
  26#include <linux/if_macvlan.h>
  27#include <linux/if_vlan.h>
  28
  29#include <net/sock.h>
  30
  31#include "vhost.h"
  32
  33static int experimental_zcopytx = 1;
  34module_param(experimental_zcopytx, int, 0444);
  35MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
  36                                       " 1 -Enable; 0 - Disable");
  37
  38/* Max number of bytes transferred before requeueing the job.
  39 * Using this limit prevents one virtqueue from starving others. */
  40#define VHOST_NET_WEIGHT 0x80000
  41
  42/* MAX number of TX used buffers for outstanding zerocopy */
  43#define VHOST_MAX_PEND 128
  44#define VHOST_GOODCOPY_LEN 256
  45
  46/*
  47 * For transmit, used buffer len is unused; we override it to track buffer
  48 * status internally; used for zerocopy tx only.
  49 */
  50/* Lower device DMA failed */
  51#define VHOST_DMA_FAILED_LEN    3
  52/* Lower device DMA done */
  53#define VHOST_DMA_DONE_LEN      2
  54/* Lower device DMA in progress */
  55#define VHOST_DMA_IN_PROGRESS   1
  56/* Buffer unused */
  57#define VHOST_DMA_CLEAR_LEN     0
  58
  59#define VHOST_DMA_IS_DONE(len) ((len) >= VHOST_DMA_DONE_LEN)
  60
  61enum {
  62        VHOST_NET_FEATURES = VHOST_FEATURES |
  63                         (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
  64                         (1ULL << VIRTIO_NET_F_MRG_RXBUF),
  65};
  66
  67enum {
  68        VHOST_NET_VQ_RX = 0,
  69        VHOST_NET_VQ_TX = 1,
  70        VHOST_NET_VQ_MAX = 2,
  71};
  72
  73struct vhost_net_ubuf_ref {
  74        struct kref kref;
  75        wait_queue_head_t wait;
  76        struct vhost_virtqueue *vq;
  77};
  78
  79struct vhost_net_virtqueue {
  80        struct vhost_virtqueue vq;
  81        /* hdr is used to store the virtio header.
  82         * Since each iovec has >= 1 byte length, we never need more than
  83         * header length entries to store the header. */
  84        struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)];
  85        size_t vhost_hlen;
  86        size_t sock_hlen;
  87        /* vhost zerocopy support fields below: */
  88        /* last used idx for outstanding DMA zerocopy buffers */
  89        int upend_idx;
  90        /* first used idx for DMA done zerocopy buffers */
  91        int done_idx;
  92        /* an array of userspace buffers info */
  93        struct ubuf_info *ubuf_info;
  94        /* Reference counting for outstanding ubufs.
  95         * Protected by vq mutex. Writers must also take device mutex. */
  96        struct vhost_net_ubuf_ref *ubufs;
  97};
  98
  99struct vhost_net {
 100        struct vhost_dev dev;
 101        struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
 102        struct vhost_poll poll[VHOST_NET_VQ_MAX];
 103        /* Number of TX recently submitted.
 104         * Protected by tx vq lock. */
 105        unsigned tx_packets;
 106        /* Number of times zerocopy TX recently failed.
 107         * Protected by tx vq lock. */
 108        unsigned tx_zcopy_err;
 109        /* Flush in progress. Protected by tx vq lock. */
 110        bool tx_flush;
 111};
 112
 113static unsigned vhost_net_zcopy_mask __read_mostly;
 114
 115static void vhost_net_enable_zcopy(int vq)
 116{
 117        vhost_net_zcopy_mask |= 0x1 << vq;
 118}
 119
 120static void vhost_net_zerocopy_done_signal(struct kref *kref)
 121{
 122        struct vhost_net_ubuf_ref *ubufs;
 123
 124        ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref);
 125        wake_up(&ubufs->wait);
 126}
 127
 128static struct vhost_net_ubuf_ref *
 129vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
 130{
 131        struct vhost_net_ubuf_ref *ubufs;
 132        /* No zero copy backend? Nothing to count. */
 133        if (!zcopy)
 134                return NULL;
 135        ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
 136        if (!ubufs)
 137                return ERR_PTR(-ENOMEM);
 138        kref_init(&ubufs->kref);
 139        init_waitqueue_head(&ubufs->wait);
 140        ubufs->vq = vq;
 141        return ubufs;
 142}
 143
 144static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
 145{
 146        kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
 147}
 148
 149static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
 150{
 151        kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
 152        wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
 153        kfree(ubufs);
 154}
 155
 156static void vhost_net_clear_ubuf_info(struct vhost_net *n)
 157{
 158        int i;
 159
 160        for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
 161                kfree(n->vqs[i].ubuf_info);
 162                n->vqs[i].ubuf_info = NULL;
 163        }
 164}
 165
 166int vhost_net_set_ubuf_info(struct vhost_net *n)
 167{
 168        bool zcopy;
 169        int i;
 170
 171        for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
 172                zcopy = vhost_net_zcopy_mask & (0x1 << i);
 173                if (!zcopy)
 174                        continue;
 175                n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) *
 176                                              UIO_MAXIOV, GFP_KERNEL);
 177                if  (!n->vqs[i].ubuf_info)
 178                        goto err;
 179        }
 180        return 0;
 181
 182err:
 183        vhost_net_clear_ubuf_info(n);
 184        return -ENOMEM;
 185}
 186
 187void vhost_net_vq_reset(struct vhost_net *n)
 188{
 189        int i;
 190
 191        vhost_net_clear_ubuf_info(n);
 192
 193        for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
 194                n->vqs[i].done_idx = 0;
 195                n->vqs[i].upend_idx = 0;
 196                n->vqs[i].ubufs = NULL;
 197                n->vqs[i].vhost_hlen = 0;
 198                n->vqs[i].sock_hlen = 0;
 199        }
 200
 201}
 202
 203static void vhost_net_tx_packet(struct vhost_net *net)
 204{
 205        ++net->tx_packets;
 206        if (net->tx_packets < 1024)
 207                return;
 208        net->tx_packets = 0;
 209        net->tx_zcopy_err = 0;
 210}
 211
 212static void vhost_net_tx_err(struct vhost_net *net)
 213{
 214        ++net->tx_zcopy_err;
 215}
 216
 217static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
 218{
 219        /* TX flush waits for outstanding DMAs to be done.
 220         * Don't start new DMAs.
 221         */
 222        return !net->tx_flush &&
 223                net->tx_packets / 64 >= net->tx_zcopy_err;
 224}
 225
 226static bool vhost_sock_zcopy(struct socket *sock)
 227{
 228        return unlikely(experimental_zcopytx) &&
 229                sock_flag(sock->sk, SOCK_ZEROCOPY);
 230}
 231
 232/* Pop first len bytes from iovec. Return number of segments used. */
 233static int move_iovec_hdr(struct iovec *from, struct iovec *to,
 234                          size_t len, int iov_count)
 235{
 236        int seg = 0;
 237        size_t size;
 238
 239        while (len && seg < iov_count) {
 240                size = min(from->iov_len, len);
 241                to->iov_base = from->iov_base;
 242                to->iov_len = size;
 243                from->iov_len -= size;
 244                from->iov_base += size;
 245                len -= size;
 246                ++from;
 247                ++to;
 248                ++seg;
 249        }
 250        return seg;
 251}
 252/* Copy iovec entries for len bytes from iovec. */
 253static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
 254                           size_t len, int iovcount)
 255{
 256        int seg = 0;
 257        size_t size;
 258
 259        while (len && seg < iovcount) {
 260                size = min(from->iov_len, len);
 261                to->iov_base = from->iov_base;
 262                to->iov_len = size;
 263                len -= size;
 264                ++from;
 265                ++to;
 266                ++seg;
 267        }
 268}
 269
 270/* In case of DMA done not in order in lower device driver for some reason.
 271 * upend_idx is used to track end of used idx, done_idx is used to track head
 272 * of used idx. Once lower device DMA done contiguously, we will signal KVM
 273 * guest used idx.
 274 */
 275static int vhost_zerocopy_signal_used(struct vhost_net *net,
 276                                      struct vhost_virtqueue *vq)
 277{
 278        struct vhost_net_virtqueue *nvq =
 279                container_of(vq, struct vhost_net_virtqueue, vq);
 280        int i;
 281        int j = 0;
 282
 283        for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
 284                if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
 285                        vhost_net_tx_err(net);
 286                if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
 287                        vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
 288                        vhost_add_used_and_signal(vq->dev, vq,
 289                                                  vq->heads[i].id, 0);
 290                        ++j;
 291                } else
 292                        break;
 293        }
 294        if (j)
 295                nvq->done_idx = i;
 296        return j;
 297}
 298
 299static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
 300{
 301        struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
 302        struct vhost_virtqueue *vq = ubufs->vq;
 303        int cnt = atomic_read(&ubufs->kref.refcount);
 304
 305        /*
 306         * Trigger polling thread if guest stopped submitting new buffers:
 307         * in this case, the refcount after decrement will eventually reach 1
 308         * so here it is 2.
 309         * We also trigger polling periodically after each 16 packets
 310         * (the value 16 here is more or less arbitrary, it's tuned to trigger
 311         * less than 10% of times).
 312         */
 313        if (cnt <= 2 || !(cnt % 16))
 314                vhost_poll_queue(&vq->poll);
 315        /* set len to mark this desc buffers done DMA */
 316        vq->heads[ubuf->desc].len = success ?
 317                VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
 318        vhost_net_ubuf_put(ubufs);
 319}
 320
 321/* Expects to be always run from workqueue - which acts as
 322 * read-size critical section for our kind of RCU. */
 323static void handle_tx(struct vhost_net *net)
 324{
 325        struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
 326        struct vhost_virtqueue *vq = &nvq->vq;
 327        unsigned out, in, s;
 328        int head;
 329        struct msghdr msg = {
 330                .msg_name = NULL,
 331                .msg_namelen = 0,
 332                .msg_control = NULL,
 333                .msg_controllen = 0,
 334                .msg_iov = vq->iov,
 335                .msg_flags = MSG_DONTWAIT,
 336        };
 337        size_t len, total_len = 0;
 338        int err;
 339        size_t hdr_size;
 340        struct socket *sock;
 341        struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
 342        bool zcopy, zcopy_used;
 343
 344        /* TODO: check that we are running from vhost_worker? */
 345        sock = rcu_dereference_check(vq->private_data, 1);
 346        if (!sock)
 347                return;
 348
 349        mutex_lock(&vq->mutex);
 350        vhost_disable_notify(&net->dev, vq);
 351
 352        hdr_size = nvq->vhost_hlen;
 353        zcopy = nvq->ubufs;
 354
 355        for (;;) {
 356                /* Release DMAs done buffers first */
 357                if (zcopy)
 358                        vhost_zerocopy_signal_used(net, vq);
 359
 360                head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
 361                                         ARRAY_SIZE(vq->iov),
 362                                         &out, &in,
 363                                         NULL, NULL);
 364                /* On error, stop handling until the next kick. */
 365                if (unlikely(head < 0))
 366                        break;
 367                /* Nothing new?  Wait for eventfd to tell us they refilled. */
 368                if (head == vq->num) {
 369                        int num_pends;
 370
 371                        /* If more outstanding DMAs, queue the work.
 372                         * Handle upend_idx wrap around
 373                         */
 374                        num_pends = likely(nvq->upend_idx >= nvq->done_idx) ?
 375                                    (nvq->upend_idx - nvq->done_idx) :
 376                                    (nvq->upend_idx + UIO_MAXIOV -
 377                                     nvq->done_idx);
 378                        if (unlikely(num_pends > VHOST_MAX_PEND))
 379                                break;
 380                        if (unlikely(vhost_enable_notify(&net->dev, vq))) {
 381                                vhost_disable_notify(&net->dev, vq);
 382                                continue;
 383                        }
 384                        break;
 385                }
 386                if (in) {
 387                        vq_err(vq, "Unexpected descriptor format for TX: "
 388                               "out %d, int %d\n", out, in);
 389                        break;
 390                }
 391                /* Skip header. TODO: support TSO. */
 392                s = move_iovec_hdr(vq->iov, nvq->hdr, hdr_size, out);
 393                msg.msg_iovlen = out;
 394                len = iov_length(vq->iov, out);
 395                /* Sanity check */
 396                if (!len) {
 397                        vq_err(vq, "Unexpected header len for TX: "
 398                               "%zd expected %zd\n",
 399                               iov_length(nvq->hdr, s), hdr_size);
 400                        break;
 401                }
 402                zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN ||
 403                                       nvq->upend_idx != nvq->done_idx);
 404
 405                /* use msg_control to pass vhost zerocopy ubuf info to skb */
 406                if (zcopy_used) {
 407                        vq->heads[nvq->upend_idx].id = head;
 408                        if (!vhost_net_tx_select_zcopy(net) ||
 409                            len < VHOST_GOODCOPY_LEN) {
 410                                /* copy don't need to wait for DMA done */
 411                                vq->heads[nvq->upend_idx].len =
 412                                                        VHOST_DMA_DONE_LEN;
 413                                msg.msg_control = NULL;
 414                                msg.msg_controllen = 0;
 415                                ubufs = NULL;
 416                        } else {
 417                                struct ubuf_info *ubuf;
 418                                ubuf = nvq->ubuf_info + nvq->upend_idx;
 419
 420                                vq->heads[nvq->upend_idx].len =
 421                                        VHOST_DMA_IN_PROGRESS;
 422                                ubuf->callback = vhost_zerocopy_callback;
 423                                ubuf->ctx = nvq->ubufs;
 424                                ubuf->desc = nvq->upend_idx;
 425                                msg.msg_control = ubuf;
 426                                msg.msg_controllen = sizeof(ubuf);
 427                                ubufs = nvq->ubufs;
 428                                kref_get(&ubufs->kref);
 429                        }
 430                        nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
 431                } else
 432                        msg.msg_control = NULL;
 433                /* TODO: Check specific error and bomb out unless ENOBUFS? */
 434                err = sock->ops->sendmsg(NULL, sock, &msg, len);
 435                if (unlikely(err < 0)) {
 436                        if (zcopy_used) {
 437                                if (ubufs)
 438                                        vhost_net_ubuf_put(ubufs);
 439                                nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
 440                                        % UIO_MAXIOV;
 441                        }
 442                        vhost_discard_vq_desc(vq, 1);
 443                        break;
 444                }
 445                if (err != len)
 446                        pr_debug("Truncated TX packet: "
 447                                 " len %d != %zd\n", err, len);
 448                if (!zcopy_used)
 449                        vhost_add_used_and_signal(&net->dev, vq, head, 0);
 450                else
 451                        vhost_zerocopy_signal_used(net, vq);
 452                total_len += len;
 453                vhost_net_tx_packet(net);
 454                if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
 455                        vhost_poll_queue(&vq->poll);
 456                        break;
 457                }
 458        }
 459
 460        mutex_unlock(&vq->mutex);
 461}
 462
 463static int peek_head_len(struct sock *sk)
 464{
 465        struct sk_buff *head;
 466        int len = 0;
 467        unsigned long flags;
 468
 469        spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
 470        head = skb_peek(&sk->sk_receive_queue);
 471        if (likely(head)) {
 472                len = head->len;
 473                if (vlan_tx_tag_present(head))
 474                        len += VLAN_HLEN;
 475        }
 476
 477        spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
 478        return len;
 479}
 480
 481/* This is a multi-buffer version of vhost_get_desc, that works if
 482 *      vq has read descriptors only.
 483 * @vq          - the relevant virtqueue
 484 * @datalen     - data length we'll be reading
 485 * @iovcount    - returned count of io vectors we fill
 486 * @log         - vhost log
 487 * @log_num     - log offset
 488 * @quota       - headcount quota, 1 for big buffer
 489 *      returns number of buffer heads allocated, negative on error
 490 */
 491static int get_rx_bufs(struct vhost_virtqueue *vq,
 492                       struct vring_used_elem *heads,
 493                       int datalen,
 494                       unsigned *iovcount,
 495                       struct vhost_log *log,
 496                       unsigned *log_num,
 497                       unsigned int quota)
 498{
 499        unsigned int out, in;
 500        int seg = 0;
 501        int headcount = 0;
 502        unsigned d;
 503        int r, nlogs = 0;
 504
 505        while (datalen > 0 && headcount < quota) {
 506                if (unlikely(seg >= UIO_MAXIOV)) {
 507                        r = -ENOBUFS;
 508                        goto err;
 509                }
 510                d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
 511                                      ARRAY_SIZE(vq->iov) - seg, &out,
 512                                      &in, log, log_num);
 513                if (d == vq->num) {
 514                        r = 0;
 515                        goto err;
 516                }
 517                if (unlikely(out || in <= 0)) {
 518                        vq_err(vq, "unexpected descriptor format for RX: "
 519                                "out %d, in %d\n", out, in);
 520                        r = -EINVAL;
 521                        goto err;
 522                }
 523                if (unlikely(log)) {
 524                        nlogs += *log_num;
 525                        log += *log_num;
 526                }
 527                heads[headcount].id = d;
 528                heads[headcount].len = iov_length(vq->iov + seg, in);
 529                datalen -= heads[headcount].len;
 530                ++headcount;
 531                seg += in;
 532        }
 533        heads[headcount - 1].len += datalen;
 534        *iovcount = seg;
 535        if (unlikely(log))
 536                *log_num = nlogs;
 537        return headcount;
 538err:
 539        vhost_discard_vq_desc(vq, headcount);
 540        return r;
 541}
 542
 543/* Expects to be always run from workqueue - which acts as
 544 * read-size critical section for our kind of RCU. */
 545static void handle_rx(struct vhost_net *net)
 546{
 547        struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
 548        struct vhost_virtqueue *vq = &nvq->vq;
 549        unsigned uninitialized_var(in), log;
 550        struct vhost_log *vq_log;
 551        struct msghdr msg = {
 552                .msg_name = NULL,
 553                .msg_namelen = 0,
 554                .msg_control = NULL, /* FIXME: get and handle RX aux data. */
 555                .msg_controllen = 0,
 556                .msg_iov = vq->iov,
 557                .msg_flags = MSG_DONTWAIT,
 558        };
 559        struct virtio_net_hdr_mrg_rxbuf hdr = {
 560                .hdr.flags = 0,
 561                .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
 562        };
 563        size_t total_len = 0;
 564        int err, mergeable;
 565        s16 headcount;
 566        size_t vhost_hlen, sock_hlen;
 567        size_t vhost_len, sock_len;
 568        /* TODO: check that we are running from vhost_worker? */
 569        struct socket *sock = rcu_dereference_check(vq->private_data, 1);
 570
 571        if (!sock)
 572                return;
 573
 574        mutex_lock(&vq->mutex);
 575        vhost_disable_notify(&net->dev, vq);
 576        vhost_hlen = nvq->vhost_hlen;
 577        sock_hlen = nvq->sock_hlen;
 578
 579        vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
 580                vq->log : NULL;
 581        mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF);
 582
 583        while ((sock_len = peek_head_len(sock->sk))) {
 584                sock_len += sock_hlen;
 585                vhost_len = sock_len + vhost_hlen;
 586                headcount = get_rx_bufs(vq, vq->heads, vhost_len,
 587                                        &in, vq_log, &log,
 588                                        likely(mergeable) ? UIO_MAXIOV : 1);
 589                /* On error, stop handling until the next kick. */
 590                if (unlikely(headcount < 0))
 591                        break;
 592                /* OK, now we need to know about added descriptors. */
 593                if (!headcount) {
 594                        if (unlikely(vhost_enable_notify(&net->dev, vq))) {
 595                                /* They have slipped one in as we were
 596                                 * doing that: check again. */
 597                                vhost_disable_notify(&net->dev, vq);
 598                                continue;
 599                        }
 600                        /* Nothing new?  Wait for eventfd to tell us
 601                         * they refilled. */
 602                        break;
 603                }
 604                /* We don't need to be notified again. */
 605                if (unlikely((vhost_hlen)))
 606                        /* Skip header. TODO: support TSO. */
 607                        move_iovec_hdr(vq->iov, nvq->hdr, vhost_hlen, in);
 608                else
 609                        /* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
 610                         * needed because recvmsg can modify msg_iov. */
 611                        copy_iovec_hdr(vq->iov, nvq->hdr, sock_hlen, in);
 612                msg.msg_iovlen = in;
 613                err = sock->ops->recvmsg(NULL, sock, &msg,
 614                                         sock_len, MSG_DONTWAIT | MSG_TRUNC);
 615                /* Userspace might have consumed the packet meanwhile:
 616                 * it's not supposed to do this usually, but might be hard
 617                 * to prevent. Discard data we got (if any) and keep going. */
 618                if (unlikely(err != sock_len)) {
 619                        pr_debug("Discarded rx packet: "
 620                                 " len %d, expected %zd\n", err, sock_len);
 621                        vhost_discard_vq_desc(vq, headcount);
 622                        continue;
 623                }
 624                if (unlikely(vhost_hlen) &&
 625                    memcpy_toiovecend(nvq->hdr, (unsigned char *)&hdr, 0,
 626                                      vhost_hlen)) {
 627                        vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
 628                               vq->iov->iov_base);
 629                        break;
 630                }
 631                /* TODO: Should check and handle checksum. */
 632                if (likely(mergeable) &&
 633                    memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
 634                                      offsetof(typeof(hdr), num_buffers),
 635                                      sizeof hdr.num_buffers)) {
 636                        vq_err(vq, "Failed num_buffers write");
 637                        vhost_discard_vq_desc(vq, headcount);
 638                        break;
 639                }
 640                vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
 641                                            headcount);
 642                if (unlikely(vq_log))
 643                        vhost_log_write(vq, vq_log, log, vhost_len);
 644                total_len += vhost_len;
 645                if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
 646                        vhost_poll_queue(&vq->poll);
 647                        break;
 648                }
 649        }
 650
 651        mutex_unlock(&vq->mutex);
 652}
 653
 654static void handle_tx_kick(struct vhost_work *work)
 655{
 656        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
 657                                                  poll.work);
 658        struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
 659
 660        handle_tx(net);
 661}
 662
 663static void handle_rx_kick(struct vhost_work *work)
 664{
 665        struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
 666                                                  poll.work);
 667        struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
 668
 669        handle_rx(net);
 670}
 671
 672static void handle_tx_net(struct vhost_work *work)
 673{
 674        struct vhost_net *net = container_of(work, struct vhost_net,
 675                                             poll[VHOST_NET_VQ_TX].work);
 676        handle_tx(net);
 677}
 678
 679static void handle_rx_net(struct vhost_work *work)
 680{
 681        struct vhost_net *net = container_of(work, struct vhost_net,
 682                                             poll[VHOST_NET_VQ_RX].work);
 683        handle_rx(net);
 684}
 685
 686static int vhost_net_open(struct inode *inode, struct file *f)
 687{
 688        struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
 689        struct vhost_dev *dev;
 690        struct vhost_virtqueue **vqs;
 691        int r, i;
 692
 693        if (!n)
 694                return -ENOMEM;
 695        vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
 696        if (!vqs) {
 697                kfree(n);
 698                return -ENOMEM;
 699        }
 700
 701        dev = &n->dev;
 702        vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
 703        vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
 704        n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
 705        n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
 706        for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
 707                n->vqs[i].ubufs = NULL;
 708                n->vqs[i].ubuf_info = NULL;
 709                n->vqs[i].upend_idx = 0;
 710                n->vqs[i].done_idx = 0;
 711                n->vqs[i].vhost_hlen = 0;
 712                n->vqs[i].sock_hlen = 0;
 713        }
 714        r = vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
 715        if (r < 0) {
 716                kfree(n);
 717                kfree(vqs);
 718                return r;
 719        }
 720
 721        vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
 722        vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
 723
 724        f->private_data = n;
 725
 726        return 0;
 727}
 728
 729static void vhost_net_disable_vq(struct vhost_net *n,
 730                                 struct vhost_virtqueue *vq)
 731{
 732        struct vhost_net_virtqueue *nvq =
 733                container_of(vq, struct vhost_net_virtqueue, vq);
 734        struct vhost_poll *poll = n->poll + (nvq - n->vqs);
 735        if (!vq->private_data)
 736                return;
 737        vhost_poll_stop(poll);
 738}
 739
 740static int vhost_net_enable_vq(struct vhost_net *n,
 741                                struct vhost_virtqueue *vq)
 742{
 743        struct vhost_net_virtqueue *nvq =
 744                container_of(vq, struct vhost_net_virtqueue, vq);
 745        struct vhost_poll *poll = n->poll + (nvq - n->vqs);
 746        struct socket *sock;
 747
 748        sock = rcu_dereference_protected(vq->private_data,
 749                                         lockdep_is_held(&vq->mutex));
 750        if (!sock)
 751                return 0;
 752
 753        return vhost_poll_start(poll, sock->file);
 754}
 755
 756static struct socket *vhost_net_stop_vq(struct vhost_net *n,
 757                                        struct vhost_virtqueue *vq)
 758{
 759        struct socket *sock;
 760
 761        mutex_lock(&vq->mutex);
 762        sock = rcu_dereference_protected(vq->private_data,
 763                                         lockdep_is_held(&vq->mutex));
 764        vhost_net_disable_vq(n, vq);
 765        rcu_assign_pointer(vq->private_data, NULL);
 766        mutex_unlock(&vq->mutex);
 767        return sock;
 768}
 769
 770static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
 771                           struct socket **rx_sock)
 772{
 773        *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq);
 774        *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
 775}
 776
 777static void vhost_net_flush_vq(struct vhost_net *n, int index)
 778{
 779        vhost_poll_flush(n->poll + index);
 780        vhost_poll_flush(&n->vqs[index].vq.poll);
 781}
 782
 783static void vhost_net_flush(struct vhost_net *n)
 784{
 785        vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
 786        vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
 787        if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
 788                mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
 789                n->tx_flush = true;
 790                mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
 791                /* Wait for all lower device DMAs done. */
 792                vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
 793                mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
 794                n->tx_flush = false;
 795                kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref);
 796                mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
 797        }
 798}
 799
 800static int vhost_net_release(struct inode *inode, struct file *f)
 801{
 802        struct vhost_net *n = f->private_data;
 803        struct socket *tx_sock;
 804        struct socket *rx_sock;
 805
 806        vhost_net_stop(n, &tx_sock, &rx_sock);
 807        vhost_net_flush(n);
 808        vhost_dev_stop(&n->dev);
 809        vhost_dev_cleanup(&n->dev, false);
 810        vhost_net_vq_reset(n);
 811        if (tx_sock)
 812                fput(tx_sock->file);
 813        if (rx_sock)
 814                fput(rx_sock->file);
 815        /* We do an extra flush before freeing memory,
 816         * since jobs can re-queue themselves. */
 817        vhost_net_flush(n);
 818        kfree(n->dev.vqs);
 819        kfree(n);
 820        return 0;
 821}
 822
 823static struct socket *get_raw_socket(int fd)
 824{
 825        struct {
 826                struct sockaddr_ll sa;
 827                char  buf[MAX_ADDR_LEN];
 828        } uaddr;
 829        int uaddr_len = sizeof uaddr, r;
 830        struct socket *sock = sockfd_lookup(fd, &r);
 831
 832        if (!sock)
 833                return ERR_PTR(-ENOTSOCK);
 834
 835        /* Parameter checking */
 836        if (sock->sk->sk_type != SOCK_RAW) {
 837                r = -ESOCKTNOSUPPORT;
 838                goto err;
 839        }
 840
 841        r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
 842                               &uaddr_len, 0);
 843        if (r)
 844                goto err;
 845
 846        if (uaddr.sa.sll_family != AF_PACKET) {
 847                r = -EPFNOSUPPORT;
 848                goto err;
 849        }
 850        return sock;
 851err:
 852        fput(sock->file);
 853        return ERR_PTR(r);
 854}
 855
 856static struct socket *get_tap_socket(int fd)
 857{
 858        struct file *file = fget(fd);
 859        struct socket *sock;
 860
 861        if (!file)
 862                return ERR_PTR(-EBADF);
 863        sock = tun_get_socket(file);
 864        if (!IS_ERR(sock))
 865                return sock;
 866        sock = macvtap_get_socket(file);
 867        if (IS_ERR(sock))
 868                fput(file);
 869        return sock;
 870}
 871
 872static struct socket *get_socket(int fd)
 873{
 874        struct socket *sock;
 875
 876        /* special case to disable backend */
 877        if (fd == -1)
 878                return NULL;
 879        sock = get_raw_socket(fd);
 880        if (!IS_ERR(sock))
 881                return sock;
 882        sock = get_tap_socket(fd);
 883        if (!IS_ERR(sock))
 884                return sock;
 885        return ERR_PTR(-ENOTSOCK);
 886}
 887
 888static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
 889{
 890        struct socket *sock, *oldsock;
 891        struct vhost_virtqueue *vq;
 892        struct vhost_net_virtqueue *nvq;
 893        struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL;
 894        int r;
 895
 896        mutex_lock(&n->dev.mutex);
 897        r = vhost_dev_check_owner(&n->dev);
 898        if (r)
 899                goto err;
 900
 901        if (index >= VHOST_NET_VQ_MAX) {
 902                r = -ENOBUFS;
 903                goto err;
 904        }
 905        vq = &n->vqs[index].vq;
 906        nvq = &n->vqs[index];
 907        mutex_lock(&vq->mutex);
 908
 909        /* Verify that ring has been setup correctly. */
 910        if (!vhost_vq_access_ok(vq)) {
 911                r = -EFAULT;
 912                goto err_vq;
 913        }
 914        sock = get_socket(fd);
 915        if (IS_ERR(sock)) {
 916                r = PTR_ERR(sock);
 917                goto err_vq;
 918        }
 919
 920        /* start polling new socket */
 921        oldsock = rcu_dereference_protected(vq->private_data,
 922                                            lockdep_is_held(&vq->mutex));
 923        if (sock != oldsock) {
 924                ubufs = vhost_net_ubuf_alloc(vq,
 925                                             sock && vhost_sock_zcopy(sock));
 926                if (IS_ERR(ubufs)) {
 927                        r = PTR_ERR(ubufs);
 928                        goto err_ubufs;
 929                }
 930
 931                vhost_net_disable_vq(n, vq);
 932                rcu_assign_pointer(vq->private_data, sock);
 933                r = vhost_init_used(vq);
 934                if (r)
 935                        goto err_used;
 936                r = vhost_net_enable_vq(n, vq);
 937                if (r)
 938                        goto err_used;
 939
 940                oldubufs = nvq->ubufs;
 941                nvq->ubufs = ubufs;
 942
 943                n->tx_packets = 0;
 944                n->tx_zcopy_err = 0;
 945                n->tx_flush = false;
 946        }
 947
 948        mutex_unlock(&vq->mutex);
 949
 950        if (oldubufs) {
 951                vhost_net_ubuf_put_and_wait(oldubufs);
 952                mutex_lock(&vq->mutex);
 953                vhost_zerocopy_signal_used(n, vq);
 954                mutex_unlock(&vq->mutex);
 955        }
 956
 957        if (oldsock) {
 958                vhost_net_flush_vq(n, index);
 959                fput(oldsock->file);
 960        }
 961
 962        mutex_unlock(&n->dev.mutex);
 963        return 0;
 964
 965err_used:
 966        rcu_assign_pointer(vq->private_data, oldsock);
 967        vhost_net_enable_vq(n, vq);
 968        if (ubufs)
 969                vhost_net_ubuf_put_and_wait(ubufs);
 970err_ubufs:
 971        fput(sock->file);
 972err_vq:
 973        mutex_unlock(&vq->mutex);
 974err:
 975        mutex_unlock(&n->dev.mutex);
 976        return r;
 977}
 978
 979static long vhost_net_reset_owner(struct vhost_net *n)
 980{
 981        struct socket *tx_sock = NULL;
 982        struct socket *rx_sock = NULL;
 983        long err;
 984        struct vhost_memory *memory;
 985
 986        mutex_lock(&n->dev.mutex);
 987        err = vhost_dev_check_owner(&n->dev);
 988        if (err)
 989                goto done;
 990        memory = vhost_dev_reset_owner_prepare();
 991        if (!memory) {
 992                err = -ENOMEM;
 993                goto done;
 994        }
 995        vhost_net_stop(n, &tx_sock, &rx_sock);
 996        vhost_net_flush(n);
 997        vhost_dev_reset_owner(&n->dev, memory);
 998        vhost_net_vq_reset(n);
 999done:
1000        mutex_unlock(&n->dev.mutex);
1001        if (tx_sock)
1002                fput(tx_sock->file);
1003        if (rx_sock)
1004                fput(rx_sock->file);
1005        return err;
1006}
1007
1008static int vhost_net_set_features(struct vhost_net *n, u64 features)
1009{
1010        size_t vhost_hlen, sock_hlen, hdr_len;
1011        int i;
1012
1013        hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ?
1014                        sizeof(struct virtio_net_hdr_mrg_rxbuf) :
1015                        sizeof(struct virtio_net_hdr);
1016        if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
1017                /* vhost provides vnet_hdr */
1018                vhost_hlen = hdr_len;
1019                sock_hlen = 0;
1020        } else {
1021                /* socket provides vnet_hdr */
1022                vhost_hlen = 0;
1023                sock_hlen = hdr_len;
1024        }
1025        mutex_lock(&n->dev.mutex);
1026        if ((features & (1 << VHOST_F_LOG_ALL)) &&
1027            !vhost_log_access_ok(&n->dev)) {
1028                mutex_unlock(&n->dev.mutex);
1029                return -EFAULT;
1030        }
1031        n->dev.acked_features = features;
1032        smp_wmb();
1033        for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
1034                mutex_lock(&n->vqs[i].vq.mutex);
1035                n->vqs[i].vhost_hlen = vhost_hlen;
1036                n->vqs[i].sock_hlen = sock_hlen;
1037                mutex_unlock(&n->vqs[i].vq.mutex);
1038        }
1039        vhost_net_flush(n);
1040        mutex_unlock(&n->dev.mutex);
1041        return 0;
1042}
1043
1044static long vhost_net_set_owner(struct vhost_net *n)
1045{
1046        int r;
1047
1048        mutex_lock(&n->dev.mutex);
1049        if (vhost_dev_has_owner(&n->dev)) {
1050                r = -EBUSY;
1051                goto out;
1052        }
1053        r = vhost_net_set_ubuf_info(n);
1054        if (r)
1055                goto out;
1056        r = vhost_dev_set_owner(&n->dev);
1057        if (r)
1058                vhost_net_clear_ubuf_info(n);
1059        vhost_net_flush(n);
1060out:
1061        mutex_unlock(&n->dev.mutex);
1062        return r;
1063}
1064
1065static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
1066                            unsigned long arg)
1067{
1068        struct vhost_net *n = f->private_data;
1069        void __user *argp = (void __user *)arg;
1070        u64 __user *featurep = argp;
1071        struct vhost_vring_file backend;
1072        u64 features;
1073        int r;
1074
1075        switch (ioctl) {
1076        case VHOST_NET_SET_BACKEND:
1077                if (copy_from_user(&backend, argp, sizeof backend))
1078                        return -EFAULT;
1079                return vhost_net_set_backend(n, backend.index, backend.fd);
1080        case VHOST_GET_FEATURES:
1081                features = VHOST_NET_FEATURES;
1082                if (copy_to_user(featurep, &features, sizeof features))
1083                        return -EFAULT;
1084                return 0;
1085        case VHOST_SET_FEATURES:
1086                if (copy_from_user(&features, featurep, sizeof features))
1087                        return -EFAULT;
1088                if (features & ~VHOST_NET_FEATURES)
1089                        return -EOPNOTSUPP;
1090                return vhost_net_set_features(n, features);
1091        case VHOST_RESET_OWNER:
1092                return vhost_net_reset_owner(n);
1093        case VHOST_SET_OWNER:
1094                return vhost_net_set_owner(n);
1095        default:
1096                mutex_lock(&n->dev.mutex);
1097                r = vhost_dev_ioctl(&n->dev, ioctl, argp);
1098                if (r == -ENOIOCTLCMD)
1099                        r = vhost_vring_ioctl(&n->dev, ioctl, argp);
1100                else
1101                        vhost_net_flush(n);
1102                mutex_unlock(&n->dev.mutex);
1103                return r;
1104        }
1105}
1106
1107#ifdef CONFIG_COMPAT
1108static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
1109                                   unsigned long arg)
1110{
1111        return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1112}
1113#endif
1114
1115static const struct file_operations vhost_net_fops = {
1116        .owner          = THIS_MODULE,
1117        .release        = vhost_net_release,
1118        .unlocked_ioctl = vhost_net_ioctl,
1119#ifdef CONFIG_COMPAT
1120        .compat_ioctl   = vhost_net_compat_ioctl,
1121#endif
1122        .open           = vhost_net_open,
1123        .llseek         = noop_llseek,
1124};
1125
1126static struct miscdevice vhost_net_misc = {
1127        .minor = VHOST_NET_MINOR,
1128        .name = "vhost-net",
1129        .fops = &vhost_net_fops,
1130};
1131
1132static int vhost_net_init(void)
1133{
1134        if (experimental_zcopytx)
1135                vhost_net_enable_zcopy(VHOST_NET_VQ_TX);
1136        return misc_register(&vhost_net_misc);
1137}
1138module_init(vhost_net_init);
1139
1140static void vhost_net_exit(void)
1141{
1142        misc_deregister(&vhost_net_misc);
1143}
1144module_exit(vhost_net_exit);
1145
1146MODULE_VERSION("0.0.1");
1147MODULE_LICENSE("GPL v2");
1148MODULE_AUTHOR("Michael S. Tsirkin");
1149MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
1150MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
1151MODULE_ALIAS("devname:vhost-net");
1152
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.