linux/drivers/net/tun.c
<<
>>
Prefs
   1/*
   2 *  TUN - Universal TUN/TAP device driver.
   3 *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
   4 *
   5 *  This program is free software; you can redistribute it and/or modify
   6 *  it under the terms of the GNU General Public License as published by
   7 *  the Free Software Foundation; either version 2 of the License, or
   8 *  (at your option) any later version.
   9 *
  10 *  This program is distributed in the hope that it will be useful,
  11 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13 *  GNU General Public License for more details.
  14 *
  15 *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
  16 */
  17
  18/*
  19 *  Changes:
  20 *
  21 *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
  22 *    Add TUNSETLINK ioctl to set the link encapsulation
  23 *
  24 *  Mark Smith <markzzzsmith@yahoo.com.au>
  25 *    Use eth_random_addr() for tap MAC address.
  26 *
  27 *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
  28 *    Fixes in packet dropping, queue length setting and queue wakeup.
  29 *    Increased default tx queue length.
  30 *    Added ethtool API.
  31 *    Minor cleanups
  32 *
  33 *  Daniel Podlejski <underley@underley.eu.org>
  34 *    Modifications for 2.3.99-pre5 kernel.
  35 */
  36
  37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  38
  39#define DRV_NAME        "tun"
  40#define DRV_VERSION     "1.6"
  41#define DRV_DESCRIPTION "Universal TUN/TAP device driver"
  42#define DRV_COPYRIGHT   "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
  43
  44#include <linux/module.h>
  45#include <linux/errno.h>
  46#include <linux/kernel.h>
  47#include <linux/major.h>
  48#include <linux/slab.h>
  49#include <linux/poll.h>
  50#include <linux/fcntl.h>
  51#include <linux/init.h>
  52#include <linux/skbuff.h>
  53#include <linux/netdevice.h>
  54#include <linux/etherdevice.h>
  55#include <linux/miscdevice.h>
  56#include <linux/ethtool.h>
  57#include <linux/rtnetlink.h>
  58#include <linux/compat.h>
  59#include <linux/if.h>
  60#include <linux/if_arp.h>
  61#include <linux/if_ether.h>
  62#include <linux/if_tun.h>
  63#include <linux/crc32.h>
  64#include <linux/nsproxy.h>
  65#include <linux/virtio_net.h>
  66#include <linux/rcupdate.h>
  67#include <net/net_namespace.h>
  68#include <net/netns/generic.h>
  69#include <net/rtnetlink.h>
  70#include <net/sock.h>
  71
  72#include <asm/uaccess.h>
  73
  74/* Uncomment to enable debugging */
  75/* #define TUN_DEBUG 1 */
  76
  77#ifdef TUN_DEBUG
  78static int debug;
  79
  80#define tun_debug(level, tun, fmt, args...)                     \
  81do {                                                            \
  82        if (tun->debug)                                         \
  83                netdev_printk(level, tun->dev, fmt, ##args);    \
  84} while (0)
  85#define DBG1(level, fmt, args...)                               \
  86do {                                                            \
  87        if (debug == 2)                                         \
  88                printk(level fmt, ##args);                      \
  89} while (0)
  90#else
  91#define tun_debug(level, tun, fmt, args...)                     \
  92do {                                                            \
  93        if (0)                                                  \
  94                netdev_printk(level, tun->dev, fmt, ##args);    \
  95} while (0)
  96#define DBG1(level, fmt, args...)                               \
  97do {                                                            \
  98        if (0)                                                  \
  99                printk(level fmt, ##args);                      \
 100} while (0)
 101#endif
 102
 103#define GOODCOPY_LEN 128
 104
 105#define FLT_EXACT_COUNT 8
 106struct tap_filter {
 107        unsigned int    count;    /* Number of addrs. Zero means disabled */
 108        u32             mask[2];  /* Mask of the hashed addrs */
 109        unsigned char   addr[FLT_EXACT_COUNT][ETH_ALEN];
 110};
 111
 112/* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for
 113 * the netdevice to be fit in one page. So we can make sure the success of
 114 * memory allocation. TODO: increase the limit. */
 115#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
 116#define MAX_TAP_FLOWS  4096
 117
 118#define TUN_FLOW_EXPIRE (3 * HZ)
 119
 120/* A tun_file connects an open character device to a tuntap netdevice. It
 121 * also contains all socket related strctures (except sock_fprog and tap_filter)
 122 * to serve as one transmit queue for tuntap device. The sock_fprog and
 123 * tap_filter were kept in tun_struct since they were used for filtering for the
 124 * netdevice not for a specific queue (at least I didn't see the requirement for
 125 * this).
 126 *
 127 * RCU usage:
 128 * The tun_file and tun_struct are loosely coupled, the pointer from one to the
 129 * other can only be read while rcu_read_lock or rtnl_lock is held.
 130 */
 131struct tun_file {
 132        struct sock sk;
 133        struct socket socket;
 134        struct socket_wq wq;
 135        struct tun_struct __rcu *tun;
 136        struct net *net;
 137        struct fasync_struct *fasync;
 138        /* only used for fasnyc */
 139        unsigned int flags;
 140        u16 queue_index;
 141        struct list_head next;
 142        struct tun_struct *detached;
 143};
 144
 145struct tun_flow_entry {
 146        struct hlist_node hash_link;
 147        struct rcu_head rcu;
 148        struct tun_struct *tun;
 149
 150        u32 rxhash;
 151        int queue_index;
 152        unsigned long updated;
 153};
 154
 155#define TUN_NUM_FLOW_ENTRIES 1024
 156
 157/* Since the socket were moved to tun_file, to preserve the behavior of persist
 158 * device, socket filter, sndbuf and vnet header size were restore when the
 159 * file were attached to a persist device.
 160 */
 161struct tun_struct {
 162        struct tun_file __rcu   *tfiles[MAX_TAP_QUEUES];
 163        unsigned int            numqueues;
 164        unsigned int            flags;
 165        kuid_t                  owner;
 166        kgid_t                  group;
 167
 168        struct net_device       *dev;
 169        netdev_features_t       set_features;
 170#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
 171                          NETIF_F_TSO6|NETIF_F_UFO)
 172
 173        int                     vnet_hdr_sz;
 174        int                     sndbuf;
 175        struct tap_filter       txflt;
 176        struct sock_fprog       fprog;
 177        /* protected by rtnl lock */
 178        bool                    filter_attached;
 179#ifdef TUN_DEBUG
 180        int debug;
 181#endif
 182        spinlock_t lock;
 183        struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
 184        struct timer_list flow_gc_timer;
 185        unsigned long ageing_time;
 186        unsigned int numdisabled;
 187        struct list_head disabled;
 188        void *security;
 189        u32 flow_count;
 190};
 191
 192static inline u32 tun_hashfn(u32 rxhash)
 193{
 194        return rxhash & 0x3ff;
 195}
 196
 197static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
 198{
 199        struct tun_flow_entry *e;
 200        struct hlist_node *n;
 201
 202        hlist_for_each_entry_rcu(e, n, head, hash_link) {
 203                if (e->rxhash == rxhash)
 204                        return e;
 205        }
 206        return NULL;
 207}
 208
 209static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
 210                                              struct hlist_head *head,
 211                                              u32 rxhash, u16 queue_index)
 212{
 213        struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
 214
 215        if (e) {
 216                tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
 217                          rxhash, queue_index);
 218                e->updated = jiffies;
 219                e->rxhash = rxhash;
 220                e->queue_index = queue_index;
 221                e->tun = tun;
 222                hlist_add_head_rcu(&e->hash_link, head);
 223                ++tun->flow_count;
 224        }
 225        return e;
 226}
 227
 228static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
 229{
 230        tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
 231                  e->rxhash, e->queue_index);
 232        hlist_del_rcu(&e->hash_link);
 233        kfree_rcu(e, rcu);
 234        --tun->flow_count;
 235}
 236
 237static void tun_flow_flush(struct tun_struct *tun)
 238{
 239        int i;
 240
 241        spin_lock_bh(&tun->lock);
 242        for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
 243                struct tun_flow_entry *e;
 244                struct hlist_node *h, *n;
 245
 246                hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link)
 247                        tun_flow_delete(tun, e);
 248        }
 249        spin_unlock_bh(&tun->lock);
 250}
 251
 252static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
 253{
 254        int i;
 255
 256        spin_lock_bh(&tun->lock);
 257        for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
 258                struct tun_flow_entry *e;
 259                struct hlist_node *h, *n;
 260
 261                hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) {
 262                        if (e->queue_index == queue_index)
 263                                tun_flow_delete(tun, e);
 264                }
 265        }
 266        spin_unlock_bh(&tun->lock);
 267}
 268
 269static void tun_flow_cleanup(unsigned long data)
 270{
 271        struct tun_struct *tun = (struct tun_struct *)data;
 272        unsigned long delay = tun->ageing_time;
 273        unsigned long next_timer = jiffies + delay;
 274        unsigned long count = 0;
 275        int i;
 276
 277        tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
 278
 279        spin_lock_bh(&tun->lock);
 280        for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
 281                struct tun_flow_entry *e;
 282                struct hlist_node *h, *n;
 283
 284                hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) {
 285                        unsigned long this_timer;
 286                        count++;
 287                        this_timer = e->updated + delay;
 288                        if (time_before_eq(this_timer, jiffies))
 289                                tun_flow_delete(tun, e);
 290                        else if (time_before(this_timer, next_timer))
 291                                next_timer = this_timer;
 292                }
 293        }
 294
 295        if (count)
 296                mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
 297        spin_unlock_bh(&tun->lock);
 298}
 299
 300static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
 301                            struct tun_file *tfile)
 302{
 303        struct hlist_head *head;
 304        struct tun_flow_entry *e;
 305        unsigned long delay = tun->ageing_time;
 306        u16 queue_index = tfile->queue_index;
 307
 308        if (!rxhash)
 309                return;
 310        else
 311                head = &tun->flows[tun_hashfn(rxhash)];
 312
 313        rcu_read_lock();
 314
 315        /* We may get a very small possibility of OOO during switching, not
 316         * worth to optimize.*/
 317        if (tun->numqueues == 1 || tfile->detached)
 318                goto unlock;
 319
 320        e = tun_flow_find(head, rxhash);
 321        if (likely(e)) {
 322                /* TODO: keep queueing to old queue until it's empty? */
 323                e->queue_index = queue_index;
 324                e->updated = jiffies;
 325        } else {
 326                spin_lock_bh(&tun->lock);
 327                if (!tun_flow_find(head, rxhash) &&
 328                    tun->flow_count < MAX_TAP_FLOWS)
 329                        tun_flow_create(tun, head, rxhash, queue_index);
 330
 331                if (!timer_pending(&tun->flow_gc_timer))
 332                        mod_timer(&tun->flow_gc_timer,
 333                                  round_jiffies_up(jiffies + delay));
 334                spin_unlock_bh(&tun->lock);
 335        }
 336
 337unlock:
 338        rcu_read_unlock();
 339}
 340
 341/* We try to identify a flow through its rxhash first. The reason that
 342 * we do not check rxq no. is becuase some cards(e.g 82599), chooses
 343 * the rxq based on the txq where the last packet of the flow comes. As
 344 * the userspace application move between processors, we may get a
 345 * different rxq no. here. If we could not get rxhash, then we would
 346 * hope the rxq no. may help here.
 347 */
 348static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
 349{
 350        struct tun_struct *tun = netdev_priv(dev);
 351        struct tun_flow_entry *e;
 352        u32 txq = 0;
 353        u32 numqueues = 0;
 354
 355        rcu_read_lock();
 356        numqueues = tun->numqueues;
 357
 358        txq = skb_get_rxhash(skb);
 359        if (txq) {
 360                e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
 361                if (e)
 362                        txq = e->queue_index;
 363                else
 364                        /* use multiply and shift instead of expensive divide */
 365                        txq = ((u64)txq * numqueues) >> 32;
 366        } else if (likely(skb_rx_queue_recorded(skb))) {
 367                txq = skb_get_rx_queue(skb);
 368                while (unlikely(txq >= numqueues))
 369                        txq -= numqueues;
 370        }
 371
 372        rcu_read_unlock();
 373        return txq;
 374}
 375
 376static inline bool tun_not_capable(struct tun_struct *tun)
 377{
 378        const struct cred *cred = current_cred();
 379        struct net *net = dev_net(tun->dev);
 380
 381        return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
 382                  (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
 383                !ns_capable(net->user_ns, CAP_NET_ADMIN);
 384}
 385
 386static void tun_set_real_num_queues(struct tun_struct *tun)
 387{
 388        netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
 389        netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
 390}
 391
 392static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
 393{
 394        tfile->detached = tun;
 395        list_add_tail(&tfile->next, &tun->disabled);
 396        ++tun->numdisabled;
 397}
 398
 399static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
 400{
 401        struct tun_struct *tun = tfile->detached;
 402
 403        tfile->detached = NULL;
 404        list_del_init(&tfile->next);
 405        --tun->numdisabled;
 406        return tun;
 407}
 408
 409static void __tun_detach(struct tun_file *tfile, bool clean)
 410{
 411        struct tun_file *ntfile;
 412        struct tun_struct *tun;
 413        struct net_device *dev;
 414
 415        tun = rtnl_dereference(tfile->tun);
 416
 417        if (tun && !tfile->detached) {
 418                u16 index = tfile->queue_index;
 419                BUG_ON(index >= tun->numqueues);
 420                dev = tun->dev;
 421
 422                rcu_assign_pointer(tun->tfiles[index],
 423                                   tun->tfiles[tun->numqueues - 1]);
 424                ntfile = rtnl_dereference(tun->tfiles[index]);
 425                ntfile->queue_index = index;
 426
 427                --tun->numqueues;
 428                if (clean) {
 429                        rcu_assign_pointer(tfile->tun, NULL);
 430                        sock_put(&tfile->sk);
 431                } else
 432                        tun_disable_queue(tun, tfile);
 433
 434                synchronize_net();
 435                tun_flow_delete_by_queue(tun, tun->numqueues + 1);
 436                /* Drop read queue */
 437                skb_queue_purge(&tfile->sk.sk_receive_queue);
 438                tun_set_real_num_queues(tun);
 439        } else if (tfile->detached && clean) {
 440                tun = tun_enable_queue(tfile);
 441                sock_put(&tfile->sk);
 442        }
 443
 444        if (clean) {
 445                if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
 446                        netif_carrier_off(tun->dev);
 447
 448                        if (!(tun->flags & TUN_PERSIST) &&
 449                            tun->dev->reg_state == NETREG_REGISTERED)
 450                                unregister_netdevice(tun->dev);
 451                }
 452
 453                BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
 454                                 &tfile->socket.flags));
 455                sk_release_kernel(&tfile->sk);
 456        }
 457}
 458
 459static void tun_detach(struct tun_file *tfile, bool clean)
 460{
 461        rtnl_lock();
 462        __tun_detach(tfile, clean);
 463        rtnl_unlock();
 464}
 465
 466static void tun_detach_all(struct net_device *dev)
 467{
 468        struct tun_struct *tun = netdev_priv(dev);
 469        struct tun_file *tfile, *tmp;
 470        int i, n = tun->numqueues;
 471
 472        for (i = 0; i < n; i++) {
 473                tfile = rtnl_dereference(tun->tfiles[i]);
 474                BUG_ON(!tfile);
 475                wake_up_all(&tfile->wq.wait);
 476                rcu_assign_pointer(tfile->tun, NULL);
 477                --tun->numqueues;
 478        }
 479        list_for_each_entry(tfile, &tun->disabled, next) {
 480                wake_up_all(&tfile->wq.wait);
 481                rcu_assign_pointer(tfile->tun, NULL);
 482        }
 483        BUG_ON(tun->numqueues != 0);
 484
 485        synchronize_net();
 486        for (i = 0; i < n; i++) {
 487                tfile = rtnl_dereference(tun->tfiles[i]);
 488                /* Drop read queue */
 489                skb_queue_purge(&tfile->sk.sk_receive_queue);
 490                sock_put(&tfile->sk);
 491        }
 492        list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
 493                tun_enable_queue(tfile);
 494                skb_queue_purge(&tfile->sk.sk_receive_queue);
 495                sock_put(&tfile->sk);
 496        }
 497        BUG_ON(tun->numdisabled != 0);
 498
 499        if (tun->flags & TUN_PERSIST)
 500                module_put(THIS_MODULE);
 501}
 502
 503static int tun_attach(struct tun_struct *tun, struct file *file)
 504{
 505        struct tun_file *tfile = file->private_data;
 506        int err;
 507
 508        err = security_tun_dev_attach(tfile->socket.sk, tun->security);
 509        if (err < 0)
 510                goto out;
 511
 512        err = -EINVAL;
 513        if (rtnl_dereference(tfile->tun) && !tfile->detached)
 514                goto out;
 515
 516        err = -EBUSY;
 517        if (!(tun->flags & TUN_TAP_MQ) && tun->numqueues == 1)
 518                goto out;
 519
 520        err = -E2BIG;
 521        if (!tfile->detached &&
 522            tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
 523                goto out;
 524
 525        err = 0;
 526
 527        /* Re-attach the filter to presist device */
 528        if (tun->filter_attached == true) {
 529                err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
 530                if (!err)
 531                        goto out;
 532        }
 533        tfile->queue_index = tun->numqueues;
 534        rcu_assign_pointer(tfile->tun, tun);
 535        rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
 536        tun->numqueues++;
 537
 538        if (tfile->detached)
 539                tun_enable_queue(tfile);
 540        else
 541                sock_hold(&tfile->sk);
 542
 543        tun_set_real_num_queues(tun);
 544
 545        /* device is allowed to go away first, so no need to hold extra
 546         * refcnt.
 547         */
 548
 549out:
 550        return err;
 551}
 552
 553static struct tun_struct *__tun_get(struct tun_file *tfile)
 554{
 555        struct tun_struct *tun;
 556
 557        rcu_read_lock();
 558        tun = rcu_dereference(tfile->tun);
 559        if (tun)
 560                dev_hold(tun->dev);
 561        rcu_read_unlock();
 562
 563        return tun;
 564}
 565
 566static struct tun_struct *tun_get(struct file *file)
 567{
 568        return __tun_get(file->private_data);
 569}
 570
 571static void tun_put(struct tun_struct *tun)
 572{
 573        dev_put(tun->dev);
 574}
 575
 576/* TAP filtering */
 577static void addr_hash_set(u32 *mask, const u8 *addr)
 578{
 579        int n = ether_crc(ETH_ALEN, addr) >> 26;
 580        mask[n >> 5] |= (1 << (n & 31));
 581}
 582
 583static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
 584{
 585        int n = ether_crc(ETH_ALEN, addr) >> 26;
 586        return mask[n >> 5] & (1 << (n & 31));
 587}
 588
 589static int update_filter(struct tap_filter *filter, void __user *arg)
 590{
 591        struct { u8 u[ETH_ALEN]; } *addr;
 592        struct tun_filter uf;
 593        int err, alen, n, nexact;
 594
 595        if (copy_from_user(&uf, arg, sizeof(uf)))
 596                return -EFAULT;
 597
 598        if (!uf.count) {
 599                /* Disabled */
 600                filter->count = 0;
 601                return 0;
 602        }
 603
 604        alen = ETH_ALEN * uf.count;
 605        addr = kmalloc(alen, GFP_KERNEL);
 606        if (!addr)
 607                return -ENOMEM;
 608
 609        if (copy_from_user(addr, arg + sizeof(uf), alen)) {
 610                err = -EFAULT;
 611                goto done;
 612        }
 613
 614        /* The filter is updated without holding any locks. Which is
 615         * perfectly safe. We disable it first and in the worst
 616         * case we'll accept a few undesired packets. */
 617        filter->count = 0;
 618        wmb();
 619
 620        /* Use first set of addresses as an exact filter */
 621        for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
 622                memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
 623
 624        nexact = n;
 625
 626        /* Remaining multicast addresses are hashed,
 627         * unicast will leave the filter disabled. */
 628        memset(filter->mask, 0, sizeof(filter->mask));
 629        for (; n < uf.count; n++) {
 630                if (!is_multicast_ether_addr(addr[n].u)) {
 631                        err = 0; /* no filter */
 632                        goto done;
 633                }
 634                addr_hash_set(filter->mask, addr[n].u);
 635        }
 636
 637        /* For ALLMULTI just set the mask to all ones.
 638         * This overrides the mask populated above. */
 639        if ((uf.flags & TUN_FLT_ALLMULTI))
 640                memset(filter->mask, ~0, sizeof(filter->mask));
 641
 642        /* Now enable the filter */
 643        wmb();
 644        filter->count = nexact;
 645
 646        /* Return the number of exact filters */
 647        err = nexact;
 648
 649done:
 650        kfree(addr);
 651        return err;
 652}
 653
 654/* Returns: 0 - drop, !=0 - accept */
 655static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
 656{
 657        /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
 658         * at this point. */
 659        struct ethhdr *eh = (struct ethhdr *) skb->data;
 660        int i;
 661
 662        /* Exact match */
 663        for (i = 0; i < filter->count; i++)
 664                if (ether_addr_equal(eh->h_dest, filter->addr[i]))
 665                        return 1;
 666
 667        /* Inexact match (multicast only) */
 668        if (is_multicast_ether_addr(eh->h_dest))
 669                return addr_hash_test(filter->mask, eh->h_dest);
 670
 671        return 0;
 672}
 673
 674/*
 675 * Checks whether the packet is accepted or not.
 676 * Returns: 0 - drop, !=0 - accept
 677 */
 678static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
 679{
 680        if (!filter->count)
 681                return 1;
 682
 683        return run_filter(filter, skb);
 684}
 685
 686/* Network device part of the driver */
 687
 688static const struct ethtool_ops tun_ethtool_ops;
 689
 690/* Net device detach from fd. */
 691static void tun_net_uninit(struct net_device *dev)
 692{
 693        tun_detach_all(dev);
 694}
 695
 696/* Net device open. */
 697static int tun_net_open(struct net_device *dev)
 698{
 699        netif_tx_start_all_queues(dev);
 700        return 0;
 701}
 702
 703/* Net device close. */
 704static int tun_net_close(struct net_device *dev)
 705{
 706        netif_tx_stop_all_queues(dev);
 707        return 0;
 708}
 709
 710/* Net device start xmit */
 711static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 712{
 713        struct tun_struct *tun = netdev_priv(dev);
 714        int txq = skb->queue_mapping;
 715        struct tun_file *tfile;
 716
 717        rcu_read_lock();
 718        tfile = rcu_dereference(tun->tfiles[txq]);
 719
 720        /* Drop packet if interface is not attached */
 721        if (txq >= tun->numqueues)
 722                goto drop;
 723
 724        tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
 725
 726        BUG_ON(!tfile);
 727
 728        /* Drop if the filter does not like it.
 729         * This is a noop if the filter is disabled.
 730         * Filter can be enabled only for the TAP devices. */
 731        if (!check_filter(&tun->txflt, skb))
 732                goto drop;
 733
 734        if (tfile->socket.sk->sk_filter &&
 735            sk_filter(tfile->socket.sk, skb))
 736                goto drop;
 737
 738        /* Limit the number of packets queued by dividing txq length with the
 739         * number of queues.
 740         */
 741        if (skb_queue_len(&tfile->socket.sk->sk_receive_queue)
 742                          >= dev->tx_queue_len / tun->numqueues)
 743                goto drop;
 744
 745        /* Orphan the skb - required as we might hang on to it
 746         * for indefinite time. */
 747        if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
 748                goto drop;
 749        skb_orphan(skb);
 750
 751        nf_reset(skb);
 752
 753        /* Enqueue packet */
 754        skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);
 755
 756        /* Notify and wake up reader process */
 757        if (tfile->flags & TUN_FASYNC)
 758                kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
 759        wake_up_interruptible_poll(&tfile->wq.wait, POLLIN |
 760                                   POLLRDNORM | POLLRDBAND);
 761
 762        rcu_read_unlock();
 763        return NETDEV_TX_OK;
 764
 765drop:
 766        dev->stats.tx_dropped++;
 767        skb_tx_error(skb);
 768        kfree_skb(skb);
 769        rcu_read_unlock();
 770        return NETDEV_TX_OK;
 771}
 772
 773static void tun_net_mclist(struct net_device *dev)
 774{
 775        /*
 776         * This callback is supposed to deal with mc filter in
 777         * _rx_ path and has nothing to do with the _tx_ path.
 778         * In rx path we always accept everything userspace gives us.
 779         */
 780}
 781
 782#define MIN_MTU 68
 783#define MAX_MTU 65535
 784
 785static int
 786tun_net_change_mtu(struct net_device *dev, int new_mtu)
 787{
 788        if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU)
 789                return -EINVAL;
 790        dev->mtu = new_mtu;
 791        return 0;
 792}
 793
 794static netdev_features_t tun_net_fix_features(struct net_device *dev,
 795        netdev_features_t features)
 796{
 797        struct tun_struct *tun = netdev_priv(dev);
 798
 799        return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
 800}
 801#ifdef CONFIG_NET_POLL_CONTROLLER
 802static void tun_poll_controller(struct net_device *dev)
 803{
 804        /*
 805         * Tun only receives frames when:
 806         * 1) the char device endpoint gets data from user space
 807         * 2) the tun socket gets a sendmsg call from user space
 808         * Since both of those are syncronous operations, we are guaranteed
 809         * never to have pending data when we poll for it
 810         * so theres nothing to do here but return.
 811         * We need this though so netpoll recognizes us as an interface that
 812         * supports polling, which enables bridge devices in virt setups to
 813         * still use netconsole
 814         */
 815        return;
 816}
 817#endif
 818static const struct net_device_ops tun_netdev_ops = {
 819        .ndo_uninit             = tun_net_uninit,
 820        .ndo_open               = tun_net_open,
 821        .ndo_stop               = tun_net_close,
 822        .ndo_start_xmit         = tun_net_xmit,
 823        .ndo_change_mtu         = tun_net_change_mtu,
 824        .ndo_fix_features       = tun_net_fix_features,
 825        .ndo_select_queue       = tun_select_queue,
 826#ifdef CONFIG_NET_POLL_CONTROLLER
 827        .ndo_poll_controller    = tun_poll_controller,
 828#endif
 829};
 830
 831static const struct net_device_ops tap_netdev_ops = {
 832        .ndo_uninit             = tun_net_uninit,
 833        .ndo_open               = tun_net_open,
 834        .ndo_stop               = tun_net_close,
 835        .ndo_start_xmit         = tun_net_xmit,
 836        .ndo_change_mtu         = tun_net_change_mtu,
 837        .ndo_fix_features       = tun_net_fix_features,
 838        .ndo_set_rx_mode        = tun_net_mclist,
 839        .ndo_set_mac_address    = eth_mac_addr,
 840        .ndo_validate_addr      = eth_validate_addr,
 841        .ndo_select_queue       = tun_select_queue,
 842#ifdef CONFIG_NET_POLL_CONTROLLER
 843        .ndo_poll_controller    = tun_poll_controller,
 844#endif
 845};
 846
 847static int tun_flow_init(struct tun_struct *tun)
 848{
 849        int i;
 850
 851        for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
 852                INIT_HLIST_HEAD(&tun->flows[i]);
 853
 854        tun->ageing_time = TUN_FLOW_EXPIRE;
 855        setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
 856        mod_timer(&tun->flow_gc_timer,
 857                  round_jiffies_up(jiffies + tun->ageing_time));
 858
 859        return 0;
 860}
 861
 862static void tun_flow_uninit(struct tun_struct *tun)
 863{
 864        del_timer_sync(&tun->flow_gc_timer);
 865        tun_flow_flush(tun);
 866}
 867
 868/* Initialize net device. */
 869static void tun_net_init(struct net_device *dev)
 870{
 871        struct tun_struct *tun = netdev_priv(dev);
 872
 873        switch (tun->flags & TUN_TYPE_MASK) {
 874        case TUN_TUN_DEV:
 875                dev->netdev_ops = &tun_netdev_ops;
 876
 877                /* Point-to-Point TUN Device */
 878                dev->hard_header_len = 0;
 879                dev->addr_len = 0;
 880                dev->mtu = 1500;
 881
 882                /* Zero header length */
 883                dev->type = ARPHRD_NONE;
 884                dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
 885                dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
 886                break;
 887
 888        case TUN_TAP_DEV:
 889                dev->netdev_ops = &tap_netdev_ops;
 890                /* Ethernet TAP Device */
 891                ether_setup(dev);
 892                dev->priv_flags &= ~IFF_TX_SKB_SHARING;
 893                dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
 894
 895                eth_hw_addr_random(dev);
 896
 897                dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
 898                break;
 899        }
 900}
 901
 902/* Character device part */
 903
 904/* Poll */
 905static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
 906{
 907        struct tun_file *tfile = file->private_data;
 908        struct tun_struct *tun = __tun_get(tfile);
 909        struct sock *sk;
 910        unsigned int mask = 0;
 911
 912        if (!tun)
 913                return POLLERR;
 914
 915        sk = tfile->socket.sk;
 916
 917        tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
 918
 919        poll_wait(file, &tfile->wq.wait, wait);
 920
 921        if (!skb_queue_empty(&sk->sk_receive_queue))
 922                mask |= POLLIN | POLLRDNORM;
 923
 924        if (sock_writeable(sk) ||
 925            (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
 926             sock_writeable(sk)))
 927                mask |= POLLOUT | POLLWRNORM;
 928
 929        if (tun->dev->reg_state != NETREG_REGISTERED)
 930                mask = POLLERR;
 931
 932        tun_put(tun);
 933        return mask;
 934}
 935
 936/* prepad is the amount to reserve at front.  len is length after that.
 937 * linear is a hint as to how much to copy (usually headers). */
 938static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
 939                                     size_t prepad, size_t len,
 940                                     size_t linear, int noblock)
 941{
 942        struct sock *sk = tfile->socket.sk;
 943        struct sk_buff *skb;
 944        int err;
 945
 946        /* Under a page?  Don't bother with paged skb. */
 947        if (prepad + len < PAGE_SIZE || !linear)
 948                linear = len;
 949
 950        skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
 951                                   &err);
 952        if (!skb)
 953                return ERR_PTR(err);
 954
 955        skb_reserve(skb, prepad);
 956        skb_put(skb, linear);
 957        skb->data_len = len - linear;
 958        skb->len += len - linear;
 959
 960        return skb;
 961}
 962
 963/* set skb frags from iovec, this can move to core network code for reuse */
 964static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
 965                                  int offset, size_t count)
 966{
 967        int len = iov_length(from, count) - offset;
 968        int copy = skb_headlen(skb);
 969        int size, offset1 = 0;
 970        int i = 0;
 971
 972        /* Skip over from offset */
 973        while (count && (offset >= from->iov_len)) {
 974                offset -= from->iov_len;
 975                ++from;
 976                --count;
 977        }
 978
 979        /* copy up to skb headlen */
 980        while (count && (copy > 0)) {
 981                size = min_t(unsigned int, copy, from->iov_len - offset);
 982                if (copy_from_user(skb->data + offset1, from->iov_base + offset,
 983                                   size))
 984                        return -EFAULT;
 985                if (copy > size) {
 986                        ++from;
 987                        --count;
 988                        offset = 0;
 989                } else
 990                        offset += size;
 991                copy -= size;
 992                offset1 += size;
 993        }
 994
 995        if (len == offset1)
 996                return 0;
 997
 998        while (count--) {
 999                struct page *page[MAX_SKB_FRAGS];
1000                int num_pages;
1001                unsigned long base;
1002                unsigned long truesize;
1003
1004                len = from->iov_len - offset;
1005                if (!len) {
1006                        offset = 0;
1007                        ++from;
1008                        continue;
1009                }
1010                base = (unsigned long)from->iov_base + offset;
1011                size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
1012                if (i + size > MAX_SKB_FRAGS)
1013                        return -EMSGSIZE;
1014                num_pages = get_user_pages_fast(base, size, 0, &page[i]);
1015                if (num_pages != size) {
1016                        for (i = 0; i < num_pages; i++)
1017                                put_page(page[i]);
1018                        return -EFAULT;
1019                }
1020                truesize = size * PAGE_SIZE;
1021                skb->data_len += len;
1022                skb->len += len;
1023                skb->truesize += truesize;
1024                atomic_add(truesize, &skb->sk->sk_wmem_alloc);
1025                while (len) {
1026                        int off = base & ~PAGE_MASK;
1027                        int size = min_t(int, len, PAGE_SIZE - off);
1028                        __skb_fill_page_desc(skb, i, page[i], off, size);
1029                        skb_shinfo(skb)->nr_frags++;
1030                        /* increase sk_wmem_alloc */
1031                        base += size;
1032                        len -= size;
1033                        i++;
1034                }
1035                offset = 0;
1036                ++from;
1037        }
1038        return 0;
1039}
1040
1041/* Get packet from user space buffer */
1042static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1043                            void *msg_control, const struct iovec *iv,
1044                            size_t total_len, size_t count, int noblock)
1045{
1046        struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1047        struct sk_buff *skb;
1048        size_t len = total_len, align = NET_SKB_PAD;
1049        struct virtio_net_hdr gso = { 0 };
1050        int offset = 0;
1051        int copylen;
1052        bool zerocopy = false;
1053        int err;
1054        u32 rxhash;
1055
1056        if (!(tun->flags & TUN_NO_PI)) {
1057                if ((len -= sizeof(pi)) > total_len)
1058                        return -EINVAL;
1059
1060                if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
1061                        return -EFAULT;
1062                offset += sizeof(pi);
1063        }
1064
1065        if (tun->flags & TUN_VNET_HDR) {
1066                if ((len -= tun->vnet_hdr_sz) > total_len)
1067                        return -EINVAL;
1068
1069                if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
1070                        return -EFAULT;
1071
1072                if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1073                    gso.csum_start + gso.csum_offset + 2 > gso.hdr_len)
1074                        gso.hdr_len = gso.csum_start + gso.csum_offset + 2;
1075
1076                if (gso.hdr_len > len)
1077                        return -EINVAL;
1078                offset += tun->vnet_hdr_sz;
1079        }
1080
1081        if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
1082                align += NET_IP_ALIGN;
1083                if (unlikely(len < ETH_HLEN ||
1084                             (gso.hdr_len && gso.hdr_len < ETH_HLEN)))
1085                        return -EINVAL;
1086        }
1087
1088        if (msg_control)
1089                zerocopy = true;
1090
1091        if (zerocopy) {
1092                /* Userspace may produce vectors with count greater than
1093                 * MAX_SKB_FRAGS, so we need to linearize parts of the skb
1094                 * to let the rest of data to be fit in the frags.
1095                 */
1096                if (count > MAX_SKB_FRAGS) {
1097                        copylen = iov_length(iv, count - MAX_SKB_FRAGS);
1098                        if (copylen < offset)
1099                                copylen = 0;
1100                        else
1101                                copylen -= offset;
1102                } else
1103                                copylen = 0;
1104                /* There are 256 bytes to be copied in skb, so there is enough
1105                 * room for skb expand head in case it is used.
1106                 * The rest of the buffer is mapped from userspace.
1107                 */
1108                if (copylen < gso.hdr_len)
1109                        copylen = gso.hdr_len;
1110                if (!copylen)
1111                        copylen = GOODCOPY_LEN;
1112        } else
1113                copylen = len;
1114
1115        skb = tun_alloc_skb(tfile, align, copylen, gso.hdr_len, noblock);
1116        if (IS_ERR(skb)) {
1117                if (PTR_ERR(skb) != -EAGAIN)
1118                        tun->dev->stats.rx_dropped++;
1119                return PTR_ERR(skb);
1120        }
1121
1122        if (zerocopy)
1123                err = zerocopy_sg_from_iovec(skb, iv, offset, count);
1124        else
1125                err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len);
1126
1127        if (err) {
1128                tun->dev->stats.rx_dropped++;
1129                kfree_skb(skb);
1130                return -EFAULT;
1131        }
1132
1133        if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1134                if (!skb_partial_csum_set(skb, gso.csum_start,
1135                                          gso.csum_offset)) {
1136                        tun->dev->stats.rx_frame_errors++;
1137                        kfree_skb(skb);
1138                        return -EINVAL;
1139                }
1140        }
1141
1142        switch (tun->flags & TUN_TYPE_MASK) {
1143        case TUN_TUN_DEV:
1144                if (tun->flags & TUN_NO_PI) {
1145                        switch (skb->data[0] & 0xf0) {
1146                        case 0x40:
1147                                pi.proto = htons(ETH_P_IP);
1148                                break;
1149                        case 0x60:
1150                                pi.proto = htons(ETH_P_IPV6);
1151                                break;
1152                        default:
1153                                tun->dev->stats.rx_dropped++;
1154                                kfree_skb(skb);
1155                                return -EINVAL;
1156                        }
1157                }
1158
1159                skb_reset_mac_header(skb);
1160                skb->protocol = pi.proto;
1161                skb->dev = tun->dev;
1162                break;
1163        case TUN_TAP_DEV:
1164                skb->protocol = eth_type_trans(skb, tun->dev);
1165                break;
1166        }
1167
1168        if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1169                pr_debug("GSO!\n");
1170                switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1171                case VIRTIO_NET_HDR_GSO_TCPV4:
1172                        skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1173                        break;
1174                case VIRTIO_NET_HDR_GSO_TCPV6:
1175                        skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1176                        break;
1177                case VIRTIO_NET_HDR_GSO_UDP:
1178                        skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1179                        break;
1180                default:
1181                        tun->dev->stats.rx_frame_errors++;
1182                        kfree_skb(skb);
1183                        return -EINVAL;
1184                }
1185
1186                if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
1187                        skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1188
1189                skb_shinfo(skb)->gso_size = gso.gso_size;
1190                if (skb_shinfo(skb)->gso_size == 0) {
1191                        tun->dev->stats.rx_frame_errors++;
1192                        kfree_skb(skb);
1193                        return -EINVAL;
1194                }
1195
1196                /* Header must be checked, and gso_segs computed. */
1197                skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1198                skb_shinfo(skb)->gso_segs = 0;
1199        }
1200
1201        /* copy skb_ubuf_info for callback when skb has no error */
1202        if (zerocopy) {
1203                skb_shinfo(skb)->destructor_arg = msg_control;
1204                skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1205        }
1206
1207        skb_reset_network_header(skb);
1208        rxhash = skb_get_rxhash(skb);
1209        netif_rx_ni(skb);
1210
1211        tun->dev->stats.rx_packets++;
1212        tun->dev->stats.rx_bytes += len;
1213
1214        tun_flow_update(tun, rxhash, tfile);
1215        return total_len;
1216}
1217
1218static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
1219                              unsigned long count, loff_t pos)
1220{
1221        struct file *file = iocb->ki_filp;
1222        struct tun_struct *tun = tun_get(file);
1223        struct tun_file *tfile = file->private_data;
1224        ssize_t result;
1225
1226        if (!tun)
1227                return -EBADFD;
1228
1229        tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
1230
1231        result = tun_get_user(tun, tfile, NULL, iv, iov_length(iv, count),
1232                              count, file->f_flags & O_NONBLOCK);
1233
1234        tun_put(tun);
1235        return result;
1236}
1237
1238/* Put packet to the user space buffer */
1239static ssize_t tun_put_user(struct tun_struct *tun,
1240                            struct tun_file *tfile,
1241                            struct sk_buff *skb,
1242                            const struct iovec *iv, int len)
1243{
1244        struct tun_pi pi = { 0, skb->protocol };
1245        ssize_t total = 0;
1246
1247        if (!(tun->flags & TUN_NO_PI)) {
1248                if ((len -= sizeof(pi)) < 0)
1249                        return -EINVAL;
1250
1251                if (len < skb->len) {
1252                        /* Packet will be striped */
1253                        pi.flags |= TUN_PKT_STRIP;
1254                }
1255
1256                if (memcpy_toiovecend(iv, (void *) &pi, 0, sizeof(pi)))
1257                        return -EFAULT;
1258                total += sizeof(pi);
1259        }
1260
1261        if (tun->flags & TUN_VNET_HDR) {
1262                struct virtio_net_hdr gso = { 0 }; /* no info leak */
1263                if ((len -= tun->vnet_hdr_sz) < 0)
1264                        return -EINVAL;
1265
1266                if (skb_is_gso(skb)) {
1267                        struct skb_shared_info *sinfo = skb_shinfo(skb);
1268
1269                        /* This is a hint as to how much should be linear. */
1270                        gso.hdr_len = skb_headlen(skb);
1271                        gso.gso_size = sinfo->gso_size;
1272                        if (sinfo->gso_type & SKB_GSO_TCPV4)
1273                                gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1274                        else if (sinfo->gso_type & SKB_GSO_TCPV6)
1275                                gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1276                        else if (sinfo->gso_type & SKB_GSO_UDP)
1277                                gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1278                        else {
1279                                pr_err("unexpected GSO type: "
1280                                       "0x%x, gso_size %d, hdr_len %d\n",
1281                                       sinfo->gso_type, gso.gso_size,
1282                                       gso.hdr_len);
1283                                print_hex_dump(KERN_ERR, "tun: ",
1284                                               DUMP_PREFIX_NONE,
1285                                               16, 1, skb->head,
1286                                               min((int)gso.hdr_len, 64), true);
1287                                WARN_ON_ONCE(1);
1288                                return -EINVAL;
1289                        }
1290                        if (sinfo->gso_type & SKB_GSO_TCP_ECN)
1291                                gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1292                } else
1293                        gso.gso_type = VIRTIO_NET_HDR_GSO_NONE;
1294
1295                if (skb->ip_summed == CHECKSUM_PARTIAL) {
1296                        gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1297                        gso.csum_start = skb_checksum_start_offset(skb);
1298                        gso.csum_offset = skb->csum_offset;
1299                } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1300                        gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
1301                } /* else everything is zero */
1302
1303                if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
1304                                               sizeof(gso))))
1305                        return -EFAULT;
1306                total += tun->vnet_hdr_sz;
1307        }
1308
1309        len = min_t(int, skb->len, len);
1310
1311        skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
1312        total += skb->len;
1313
1314        tun->dev->stats.tx_packets++;
1315        tun->dev->stats.tx_bytes += len;
1316
1317        return total;
1318}
1319
1320static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
1321                           struct kiocb *iocb, const struct iovec *iv,
1322                           ssize_t len, int noblock)
1323{
1324        DECLARE_WAITQUEUE(wait, current);
1325        struct sk_buff *skb;
1326        ssize_t ret = 0;
1327
1328        tun_debug(KERN_INFO, tun, "tun_do_read\n");
1329
1330        if (unlikely(!noblock))
1331                add_wait_queue(&tfile->wq.wait, &wait);
1332        while (len) {
1333                current->state = TASK_INTERRUPTIBLE;
1334
1335                /* Read frames from the queue */
1336                if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
1337                        if (noblock) {
1338                                ret = -EAGAIN;
1339                                break;
1340                        }
1341                        if (signal_pending(current)) {
1342                                ret = -ERESTARTSYS;
1343                                break;
1344                        }
1345                        if (tun->dev->reg_state != NETREG_REGISTERED) {
1346                                ret = -EIO;
1347                                break;
1348                        }
1349
1350                        /* Nothing to read, let's sleep */
1351                        schedule();
1352                        continue;
1353                }
1354
1355                ret = tun_put_user(tun, tfile, skb, iv, len);
1356                kfree_skb(skb);
1357                break;
1358        }
1359
1360        current->state = TASK_RUNNING;
1361        if (unlikely(!noblock))
1362                remove_wait_queue(&tfile->wq.wait, &wait);
1363
1364        return ret;
1365}
1366
1367static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
1368                            unsigned long count, loff_t pos)
1369{
1370        struct file *file = iocb->ki_filp;
1371        struct tun_file *tfile = file->private_data;
1372        struct tun_struct *tun = __tun_get(tfile);
1373        ssize_t len, ret;
1374
1375        if (!tun)
1376                return -EBADFD;
1377        len = iov_length(iv, count);
1378        if (len < 0) {
1379                ret = -EINVAL;
1380                goto out;
1381        }
1382
1383        ret = tun_do_read(tun, tfile, iocb, iv, len,
1384                          file->f_flags & O_NONBLOCK);
1385        ret = min_t(ssize_t, ret, len);
1386out:
1387        tun_put(tun);
1388        return ret;
1389}
1390
1391static void tun_free_netdev(struct net_device *dev)
1392{
1393        struct tun_struct *tun = netdev_priv(dev);
1394
1395        BUG_ON(!(list_empty(&tun->disabled)));
1396        tun_flow_uninit(tun);
1397        security_tun_dev_free_security(tun->security);
1398        free_netdev(dev);
1399}
1400
1401static void tun_setup(struct net_device *dev)
1402{
1403        struct tun_struct *tun = netdev_priv(dev);
1404
1405        tun->owner = INVALID_UID;
1406        tun->group = INVALID_GID;
1407
1408        dev->ethtool_ops = &tun_ethtool_ops;
1409        dev->destructor = tun_free_netdev;
1410}
1411
1412/* Trivial set of netlink ops to allow deleting tun or tap
1413 * device with netlink.
1414 */
1415static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
1416{
1417        return -EINVAL;
1418}
1419
1420static struct rtnl_link_ops tun_link_ops __read_mostly = {
1421        .kind           = DRV_NAME,
1422        .priv_size      = sizeof(struct tun_struct),
1423        .setup          = tun_setup,
1424        .validate       = tun_validate,
1425};
1426
1427static void tun_sock_write_space(struct sock *sk)
1428{
1429        struct tun_file *tfile;
1430        wait_queue_head_t *wqueue;
1431
1432        if (!sock_writeable(sk))
1433                return;
1434
1435        if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
1436                return;
1437
1438        wqueue = sk_sleep(sk);
1439        if (wqueue && waitqueue_active(wqueue))
1440                wake_up_interruptible_sync_poll(wqueue, POLLOUT |
1441                                                POLLWRNORM | POLLWRBAND);
1442
1443        tfile = container_of(sk, struct tun_file, sk);
1444        kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
1445}
1446
1447static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
1448                       struct msghdr *m, size_t total_len)
1449{
1450        int ret;
1451        struct tun_file *tfile = container_of(sock, struct tun_file, socket);
1452        struct tun_struct *tun = __tun_get(tfile);
1453
1454        if (!tun)
1455                return -EBADFD;
1456        ret = tun_get_user(tun, tfile, m->msg_control, m->msg_iov, total_len,
1457                           m->msg_iovlen, m->msg_flags & MSG_DONTWAIT);
1458        tun_put(tun);
1459        return ret;
1460}
1461
1462
1463static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
1464                       struct msghdr *m, size_t total_len,
1465                       int flags)
1466{
1467        struct tun_file *tfile = container_of(sock, struct tun_file, socket);
1468        struct tun_struct *tun = __tun_get(tfile);
1469        int ret;
1470
1471        if (!tun)
1472                return -EBADFD;
1473
1474        if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
1475                return -EINVAL;
1476        ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
1477                          flags & MSG_DONTWAIT);
1478        if (ret > total_len) {
1479                m->msg_flags |= MSG_TRUNC;
1480                ret = flags & MSG_TRUNC ? ret : total_len;
1481        }
1482        tun_put(tun);
1483        return ret;
1484}
1485
1486static int tun_release(struct socket *sock)
1487{
1488        if (sock->sk)
1489                sock_put(sock->sk);
1490        return 0;
1491}
1492
1493/* Ops structure to mimic raw sockets with tun */
1494static const struct proto_ops tun_socket_ops = {
1495        .sendmsg = tun_sendmsg,
1496        .recvmsg = tun_recvmsg,
1497        .release = tun_release,
1498};
1499
1500static struct proto tun_proto = {
1501        .name           = "tun",
1502        .owner          = THIS_MODULE,
1503        .obj_size       = sizeof(struct tun_file),
1504};
1505
1506static int tun_flags(struct tun_struct *tun)
1507{
1508        int flags = 0;
1509
1510        if (tun->flags & TUN_TUN_DEV)
1511                flags |= IFF_TUN;
1512        else
1513                flags |= IFF_TAP;
1514
1515        if (tun->flags & TUN_NO_PI)
1516                flags |= IFF_NO_PI;
1517
1518        /* This flag has no real effect.  We track the value for backwards
1519         * compatibility.
1520         */
1521        if (tun->flags & TUN_ONE_QUEUE)
1522                flags |= IFF_ONE_QUEUE;
1523
1524        if (tun->flags & TUN_VNET_HDR)
1525                flags |= IFF_VNET_HDR;
1526
1527        if (tun->flags & TUN_TAP_MQ)
1528                flags |= IFF_MULTI_QUEUE;
1529
1530        return flags;
1531}
1532
1533static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
1534                              char *buf)
1535{
1536        struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1537        return sprintf(buf, "0x%x\n", tun_flags(tun));
1538}
1539
1540static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
1541                              char *buf)
1542{
1543        struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1544        return uid_valid(tun->owner)?
1545                sprintf(buf, "%u\n",
1546                        from_kuid_munged(current_user_ns(), tun->owner)):
1547                sprintf(buf, "-1\n");
1548}
1549
1550static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
1551                              char *buf)
1552{
1553        struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1554        return gid_valid(tun->group) ?
1555                sprintf(buf, "%u\n",
1556                        from_kgid_munged(current_user_ns(), tun->group)):
1557                sprintf(buf, "-1\n");
1558}
1559
1560static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
1561static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
1562static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
1563
1564static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1565{
1566        struct tun_struct *tun;
1567        struct tun_file *tfile = file->private_data;
1568        struct net_device *dev;
1569        int err;
1570
1571        if (tfile->detached)
1572                return -EINVAL;
1573
1574        dev = __dev_get_by_name(net, ifr->ifr_name);
1575        if (dev) {
1576                if (ifr->ifr_flags & IFF_TUN_EXCL)
1577                        return -EBUSY;
1578                if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
1579                        tun = netdev_priv(dev);
1580                else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
1581                        tun = netdev_priv(dev);
1582                else
1583                        return -EINVAL;
1584
1585                if (tun_not_capable(tun))
1586                        return -EPERM;
1587                err = security_tun_dev_open(tun->security);
1588                if (err < 0)
1589                        return err;
1590
1591                err = tun_attach(tun, file);
1592                if (err < 0)
1593                        return err;
1594
1595                if (tun->flags & TUN_TAP_MQ &&
1596                    (tun->numqueues + tun->numdisabled > 1))
1597                        return err;
1598        }
1599        else {
1600                char *name;
1601                unsigned long flags = 0;
1602                int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
1603                             MAX_TAP_QUEUES : 1;
1604
1605                if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1606                        return -EPERM;
1607                err = security_tun_dev_create();
1608                if (err < 0)
1609                        return err;
1610
1611                /* Set dev type */
1612                if (ifr->ifr_flags & IFF_TUN) {
1613                        /* TUN device */
1614                        flags |= TUN_TUN_DEV;
1615                        name = "tun%d";
1616                } else if (ifr->ifr_flags & IFF_TAP) {
1617                        /* TAP device */
1618                        flags |= TUN_TAP_DEV;
1619                        name = "tap%d";
1620                } else
1621                        return -EINVAL;
1622
1623                if (*ifr->ifr_name)
1624                        name = ifr->ifr_name;
1625
1626                dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
1627                                       tun_setup, queues, queues);
1628
1629                if (!dev)
1630                        return -ENOMEM;
1631
1632                dev_net_set(dev, net);
1633                dev->rtnl_link_ops = &tun_link_ops;
1634
1635                tun = netdev_priv(dev);
1636                tun->dev = dev;
1637                tun->flags = flags;
1638                tun->txflt.count = 0;
1639                tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
1640
1641                tun->filter_attached = false;
1642                tun->sndbuf = tfile->socket.sk->sk_sndbuf;
1643
1644                spin_lock_init(&tun->lock);
1645
1646                err = security_tun_dev_alloc_security(&tun->security);
1647                if (err < 0)
1648                        goto err_free_dev;
1649
1650                tun_net_init(dev);
1651
1652                err = tun_flow_init(tun);
1653                if (err < 0)
1654                        goto err_free_dev;
1655
1656                dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
1657                        TUN_USER_FEATURES;
1658                dev->features = dev->hw_features;
1659
1660                INIT_LIST_HEAD(&tun->disabled);
1661                err = tun_attach(tun, file);
1662                if (err < 0)
1663                        goto err_free_dev;
1664
1665                err = register_netdevice(tun->dev);
1666                if (err < 0)
1667                        goto err_free_dev;
1668
1669                if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
1670                    device_create_file(&tun->dev->dev, &dev_attr_owner) ||
1671                    device_create_file(&tun->dev->dev, &dev_attr_group))
1672                        pr_err("Failed to create tun sysfs files\n");
1673        }
1674
1675        netif_carrier_on(tun->dev);
1676
1677        tun_debug(KERN_INFO, tun, "tun_set_iff\n");
1678
1679        if (ifr->ifr_flags & IFF_NO_PI)
1680                tun->flags |= TUN_NO_PI;
1681        else
1682                tun->flags &= ~TUN_NO_PI;
1683
1684        /* This flag has no real effect.  We track the value for backwards
1685         * compatibility.
1686         */
1687        if (ifr->ifr_flags & IFF_ONE_QUEUE)
1688                tun->flags |= TUN_ONE_QUEUE;
1689        else
1690                tun->flags &= ~TUN_ONE_QUEUE;
1691
1692        if (ifr->ifr_flags & IFF_VNET_HDR)
1693                tun->flags |= TUN_VNET_HDR;
1694        else
1695                tun->flags &= ~TUN_VNET_HDR;
1696
1697        if (ifr->ifr_flags & IFF_MULTI_QUEUE)
1698                tun->flags |= TUN_TAP_MQ;
1699        else
1700                tun->flags &= ~TUN_TAP_MQ;
1701
1702        /* Make sure persistent devices do not get stuck in
1703         * xoff state.
1704         */
1705        if (netif_running(tun->dev))
1706                netif_tx_wake_all_queues(tun->dev);
1707
1708        strcpy(ifr->ifr_name, tun->dev->name);
1709        return 0;
1710
1711 err_free_dev:
1712        free_netdev(dev);
1713        return err;
1714}
1715
1716static void tun_get_iff(struct net *net, struct tun_struct *tun,
1717                       struct ifreq *ifr)
1718{
1719        tun_debug(KERN_INFO, tun, "tun_get_iff\n");
1720
1721        strcpy(ifr->ifr_name, tun->dev->name);
1722
1723        ifr->ifr_flags = tun_flags(tun);
1724
1725}
1726
1727/* This is like a cut-down ethtool ops, except done via tun fd so no
1728 * privs required. */
1729static int set_offload(struct tun_struct *tun, unsigned long arg)
1730{
1731        netdev_features_t features = 0;
1732
1733        if (arg & TUN_F_CSUM) {
1734                features |= NETIF_F_HW_CSUM;
1735                arg &= ~TUN_F_CSUM;
1736
1737                if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
1738                        if (arg & TUN_F_TSO_ECN) {
1739                                features |= NETIF_F_TSO_ECN;
1740                                arg &= ~TUN_F_TSO_ECN;
1741                        }
1742                        if (arg & TUN_F_TSO4)
1743                                features |= NETIF_F_TSO;
1744                        if (arg & TUN_F_TSO6)
1745                                features |= NETIF_F_TSO6;
1746                        arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
1747                }
1748
1749                if (arg & TUN_F_UFO) {
1750                        features |= NETIF_F_UFO;
1751                        arg &= ~TUN_F_UFO;
1752                }
1753        }
1754
1755        /* This gives the user a way to test for new features in future by
1756         * trying to set them. */
1757        if (arg)
1758                return -EINVAL;
1759
1760        tun->set_features = features;
1761        netdev_update_features(tun->dev);
1762
1763        return 0;
1764}
1765
1766static void tun_detach_filter(struct tun_struct *tun, int n)
1767{
1768        int i;
1769        struct tun_file *tfile;
1770
1771        for (i = 0; i < n; i++) {
1772                tfile = rtnl_dereference(tun->tfiles[i]);
1773                sk_detach_filter(tfile->socket.sk);
1774        }
1775
1776        tun->filter_attached = false;
1777}
1778
1779static int tun_attach_filter(struct tun_struct *tun)
1780{
1781        int i, ret = 0;
1782        struct tun_file *tfile;
1783
1784        for (i = 0; i < tun->numqueues; i++) {
1785                tfile = rtnl_dereference(tun->tfiles[i]);
1786                ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
1787                if (ret) {
1788                        tun_detach_filter(tun, i);
1789                        return ret;
1790                }
1791        }
1792
1793        tun->filter_attached = true;
1794        return ret;
1795}
1796
1797static void tun_set_sndbuf(struct tun_struct *tun)
1798{
1799        struct tun_file *tfile;
1800        int i;
1801
1802        for (i = 0; i < tun->numqueues; i++) {
1803                tfile = rtnl_dereference(tun->tfiles[i]);
1804                tfile->socket.sk->sk_sndbuf = tun->sndbuf;
1805        }
1806}
1807
1808static int tun_set_queue(struct file *file, struct ifreq *ifr)
1809{
1810        struct tun_file *tfile = file->private_data;
1811        struct tun_struct *tun;
1812        int ret = 0;
1813
1814        rtnl_lock();
1815
1816        if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
1817                tun = tfile->detached;
1818                if (!tun) {
1819                        ret = -EINVAL;
1820                        goto unlock;
1821                }
1822                ret = security_tun_dev_attach_queue(tun->security);
1823                if (ret < 0)
1824                        goto unlock;
1825                ret = tun_attach(tun, file);
1826        } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
1827                tun = rtnl_dereference(tfile->tun);
1828                if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
1829                        ret = -EINVAL;
1830                else
1831                        __tun_detach(tfile, false);
1832        } else
1833                ret = -EINVAL;
1834
1835unlock:
1836        rtnl_unlock();
1837        return ret;
1838}
1839
1840static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1841                            unsigned long arg, int ifreq_len)
1842{
1843        struct tun_file *tfile = file->private_data;
1844        struct tun_struct *tun;
1845        void __user* argp = (void __user*)arg;
1846        struct ifreq ifr;
1847        kuid_t owner;
1848        kgid_t group;
1849        int sndbuf;
1850        int vnet_hdr_sz;
1851        int ret;
1852
1853        if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
1854                if (copy_from_user(&ifr, argp, ifreq_len))
1855                        return -EFAULT;
1856        } else {
1857                memset(&ifr, 0, sizeof(ifr));
1858        }
1859        if (cmd == TUNGETFEATURES) {
1860                /* Currently this just means: "what IFF flags are valid?".
1861                 * This is needed because we never checked for invalid flags on
1862                 * TUNSETIFF. */
1863                return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
1864                                IFF_VNET_HDR | IFF_MULTI_QUEUE,
1865                                (unsigned int __user*)argp);
1866        } else if (cmd == TUNSETQUEUE)
1867                return tun_set_queue(file, &ifr);
1868
1869        ret = 0;
1870        rtnl_lock();
1871
1872        tun = __tun_get(tfile);
1873        if (cmd == TUNSETIFF && !tun) {
1874                ifr.ifr_name[IFNAMSIZ-1] = '\0';
1875
1876                ret = tun_set_iff(tfile->net, file, &ifr);
1877
1878                if (ret)
1879                        goto unlock;
1880
1881                if (copy_to_user(argp, &ifr, ifreq_len))
1882                        ret = -EFAULT;
1883                goto unlock;
1884        }
1885
1886        ret = -EBADFD;
1887        if (!tun)
1888                goto unlock;
1889
1890        tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
1891
1892        ret = 0;
1893        switch (cmd) {
1894        case TUNGETIFF:
1895                tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
1896
1897                if (copy_to_user(argp, &ifr, ifreq_len))
1898                        ret = -EFAULT;
1899                break;
1900
1901        case TUNSETNOCSUM:
1902                /* Disable/Enable checksum */
1903
1904                /* [unimplemented] */
1905                tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
1906                          arg ? "disabled" : "enabled");
1907                break;
1908
1909        case TUNSETPERSIST:
1910                /* Disable/Enable persist mode. Keep an extra reference to the
1911                 * module to prevent the module being unprobed.
1912                 */
1913                if (arg && !(tun->flags & TUN_PERSIST)) {
1914                        tun->flags |= TUN_PERSIST;
1915                        __module_get(THIS_MODULE);
1916                }
1917                if (!arg && (tun->flags & TUN_PERSIST)) {
1918                        tun->flags &= ~TUN_PERSIST;
1919                        module_put(THIS_MODULE);
1920                }
1921
1922                tun_debug(KERN_INFO, tun, "persist %s\n",
1923                          arg ? "enabled" : "disabled");
1924                break;
1925
1926        case TUNSETOWNER:
1927                /* Set owner of the device */
1928                owner = make_kuid(current_user_ns(), arg);
1929                if (!uid_valid(owner)) {
1930                        ret = -EINVAL;
1931                        break;
1932                }
1933                tun->owner = owner;
1934                tun_debug(KERN_INFO, tun, "owner set to %u\n",
1935                          from_kuid(&init_user_ns, tun->owner));
1936                break;
1937
1938        case TUNSETGROUP:
1939                /* Set group of the device */
1940                group = make_kgid(current_user_ns(), arg);
1941                if (!gid_valid(group)) {
1942                        ret = -EINVAL;
1943                        break;
1944                }
1945                tun->group = group;
1946                tun_debug(KERN_INFO, tun, "group set to %u\n",
1947                          from_kgid(&init_user_ns, tun->group));
1948                break;
1949
1950        case TUNSETLINK:
1951                /* Only allow setting the type when the interface is down */
1952                if (tun->dev->flags & IFF_UP) {
1953                        tun_debug(KERN_INFO, tun,
1954                                  "Linktype set failed because interface is up\n");
1955                        ret = -EBUSY;
1956                } else {
1957                        tun->dev->type = (int) arg;
1958                        tun_debug(KERN_INFO, tun, "linktype set to %d\n",
1959                                  tun->dev->type);
1960                        ret = 0;
1961                }
1962                break;
1963
1964#ifdef TUN_DEBUG
1965        case TUNSETDEBUG:
1966                tun->debug = arg;
1967                break;
1968#endif
1969        case TUNSETOFFLOAD:
1970                ret = set_offload(tun, arg);
1971                break;
1972
1973        case TUNSETTXFILTER:
1974                /* Can be set only for TAPs */
1975                ret = -EINVAL;
1976                if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
1977                        break;
1978                ret = update_filter(&tun->txflt, (void __user *)arg);
1979                break;
1980
1981        case SIOCGIFHWADDR:
1982                /* Get hw address */
1983                memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
1984                ifr.ifr_hwaddr.sa_family = tun->dev->type;
1985                if (copy_to_user(argp, &ifr, ifreq_len))
1986                        ret = -EFAULT;
1987                break;
1988
1989        case SIOCSIFHWADDR:
1990                /* Set hw address */
1991                tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
1992                          ifr.ifr_hwaddr.sa_data);
1993
1994                ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
1995                break;
1996
1997        case TUNGETSNDBUF:
1998                sndbuf = tfile->socket.sk->sk_sndbuf;
1999                if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
2000                        ret = -EFAULT;
2001                break;
2002
2003        case TUNSETSNDBUF:
2004                if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
2005                        ret = -EFAULT;
2006                        break;
2007                }
2008
2009                tun->sndbuf = sndbuf;
2010                tun_set_sndbuf(tun);
2011                break;
2012
2013        case TUNGETVNETHDRSZ:
2014                vnet_hdr_sz = tun->vnet_hdr_sz;
2015                if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
2016                        ret = -EFAULT;
2017                break;
2018
2019        case TUNSETVNETHDRSZ:
2020                if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
2021                        ret = -EFAULT;
2022                        break;
2023                }
2024                if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
2025                        ret = -EINVAL;
2026                        break;
2027                }
2028
2029                tun->vnet_hdr_sz = vnet_hdr_sz;
2030                break;
2031
2032        case TUNATTACHFILTER:
2033                /* Can be set only for TAPs */
2034                ret = -EINVAL;
2035                if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
2036                        break;
2037                ret = -EFAULT;
2038                if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
2039                        break;
2040
2041                ret = tun_attach_filter(tun);
2042                break;
2043
2044        case TUNDETACHFILTER:
2045                /* Can be set only for TAPs */
2046                ret = -EINVAL;
2047                if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
2048                        break;
2049                ret = 0;
2050                tun_detach_filter(tun, tun->numqueues);
2051                break;
2052
2053        default:
2054                ret = -EINVAL;
2055                break;
2056        }
2057
2058unlock:
2059        rtnl_unlock();
2060        if (tun)
2061                tun_put(tun);
2062        return ret;
2063}
2064
2065static long tun_chr_ioctl(struct file *file,
2066                          unsigned int cmd, unsigned long arg)
2067{
2068        return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
2069}
2070
2071#ifdef CONFIG_COMPAT
2072static long tun_chr_compat_ioctl(struct file *file,
2073                         unsigned int cmd, unsigned long arg)
2074{
2075        switch (cmd) {
2076        case TUNSETIFF:
2077        case TUNGETIFF:
2078        case TUNSETTXFILTER:
2079        case TUNGETSNDBUF:
2080        case TUNSETSNDBUF:
2081        case SIOCGIFHWADDR:
2082        case SIOCSIFHWADDR:
2083                arg = (unsigned long)compat_ptr(arg);
2084                break;
2085        default:
2086                arg = (compat_ulong_t)arg;
2087                break;
2088        }
2089
2090        /*
2091         * compat_ifreq is shorter than ifreq, so we must not access beyond
2092         * the end of that structure. All fields that are used in this
2093         * driver are compatible though, we don't need to convert the
2094         * contents.
2095         */
2096        return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
2097}
2098#endif /* CONFIG_COMPAT */
2099
2100static int tun_chr_fasync(int fd, struct file *file, int on)
2101{
2102        struct tun_file *tfile = file->private_data;
2103        int ret;
2104
2105        if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
2106                goto out;
2107
2108        if (on) {
2109                ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
2110                if (ret)
2111                        goto out;
2112                tfile->flags |= TUN_FASYNC;
2113        } else
2114                tfile->flags &= ~TUN_FASYNC;
2115        ret = 0;
2116out:
2117        return ret;
2118}
2119
2120static int tun_chr_open(struct inode *inode, struct file * file)
2121{
2122        struct tun_file *tfile;
2123
2124        DBG1(KERN_INFO, "tunX: tun_chr_open\n");
2125
2126        tfile = (struct tun_file *)sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL,
2127                                            &tun_proto);
2128        if (!tfile)
2129                return -ENOMEM;
2130        rcu_assign_pointer(tfile->tun, NULL);
2131        tfile->net = get_net(current->nsproxy->net_ns);
2132        tfile->flags = 0;
2133
2134        rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
2135        init_waitqueue_head(&tfile->wq.wait);
2136
2137        tfile->socket.file = file;
2138        tfile->socket.ops = &tun_socket_ops;
2139
2140        sock_init_data(&tfile->socket, &tfile->sk);
2141        sk_change_net(&tfile->sk, tfile->net);
2142
2143        tfile->sk.sk_write_space = tun_sock_write_space;
2144        tfile->sk.sk_sndbuf = INT_MAX;
2145
2146        file->private_data = tfile;
2147        set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
2148        INIT_LIST_HEAD(&tfile->next);
2149
2150        return 0;
2151}
2152
2153static int tun_chr_close(struct inode *inode, struct file *file)
2154{
2155        struct tun_file *tfile = file->private_data;
2156        struct net *net = tfile->net;
2157
2158        tun_detach(tfile, true);
2159        put_net(net);
2160
2161        return 0;
2162}
2163
2164static const struct file_operations tun_fops = {
2165        .owner  = THIS_MODULE,
2166        .llseek = no_llseek,
2167        .read  = do_sync_read,
2168        .aio_read  = tun_chr_aio_read,
2169        .write = do_sync_write,
2170        .aio_write = tun_chr_aio_write,
2171        .poll   = tun_chr_poll,
2172        .unlocked_ioctl = tun_chr_ioctl,
2173#ifdef CONFIG_COMPAT
2174        .compat_ioctl = tun_chr_compat_ioctl,
2175#endif
2176        .open   = tun_chr_open,
2177        .release = tun_chr_close,
2178        .fasync = tun_chr_fasync
2179};
2180
2181static struct miscdevice tun_miscdev = {
2182        .minor = TUN_MINOR,
2183        .name = "tun",
2184        .nodename = "net/tun",
2185        .fops = &tun_fops,
2186};
2187
2188/* ethtool interface */
2189
2190static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2191{
2192        cmd->supported          = 0;
2193        cmd->advertising        = 0;
2194        ethtool_cmd_speed_set(cmd, SPEED_10);
2195        cmd->duplex             = DUPLEX_FULL;
2196        cmd->port               = PORT_TP;
2197        cmd->phy_address        = 0;
2198        cmd->transceiver        = XCVR_INTERNAL;
2199        cmd->autoneg            = AUTONEG_DISABLE;
2200        cmd->maxtxpkt           = 0;
2201        cmd->maxrxpkt           = 0;
2202        return 0;
2203}
2204
2205static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2206{
2207        struct tun_struct *tun = netdev_priv(dev);
2208
2209        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2210        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2211
2212        switch (tun->flags & TUN_TYPE_MASK) {
2213        case TUN_TUN_DEV:
2214                strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
2215                break;
2216        case TUN_TAP_DEV:
2217                strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
2218                break;
2219        }
2220}
2221
2222static u32 tun_get_msglevel(struct net_device *dev)
2223{
2224#ifdef TUN_DEBUG
2225        struct tun_struct *tun = netdev_priv(dev);
2226        return tun->debug;
2227#else
2228        return -EOPNOTSUPP;
2229#endif
2230}
2231
2232static void tun_set_msglevel(struct net_device *dev, u32 value)
2233{
2234#ifdef TUN_DEBUG
2235        struct tun_struct *tun = netdev_priv(dev);
2236        tun->debug = value;
2237#endif
2238}
2239
2240static const struct ethtool_ops tun_ethtool_ops = {
2241        .get_settings   = tun_get_settings,
2242        .get_drvinfo    = tun_get_drvinfo,
2243        .get_msglevel   = tun_get_msglevel,
2244        .set_msglevel   = tun_set_msglevel,
2245        .get_link       = ethtool_op_get_link,
2246};
2247
2248
2249static int __init tun_init(void)
2250{
2251        int ret = 0;
2252
2253        pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2254        pr_info("%s\n", DRV_COPYRIGHT);
2255
2256        ret = rtnl_link_register(&tun_link_ops);
2257        if (ret) {
2258                pr_err("Can't register link_ops\n");
2259                goto err_linkops;
2260        }
2261
2262        ret = misc_register(&tun_miscdev);
2263        if (ret) {
2264                pr_err("Can't register misc device %d\n", TUN_MINOR);
2265                goto err_misc;
2266        }
2267        return  0;
2268err_misc:
2269        rtnl_link_unregister(&tun_link_ops);
2270err_linkops:
2271        return ret;
2272}
2273
2274static void tun_cleanup(void)
2275{
2276        misc_deregister(&tun_miscdev);
2277        rtnl_link_unregister(&tun_link_ops);
2278}
2279
2280/* Get an underlying socket object from tun file.  Returns error unless file is
2281 * attached to a device.  The returned object works like a packet socket, it
2282 * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
2283 * holding a reference to the file for as long as the socket is in use. */
2284struct socket *tun_get_socket(struct file *file)
2285{
2286        struct tun_file *tfile;
2287        if (file->f_op != &tun_fops)
2288                return ERR_PTR(-EINVAL);
2289        tfile = file->private_data;
2290        if (!tfile)
2291                return ERR_PTR(-EBADFD);
2292        return &tfile->socket;
2293}
2294EXPORT_SYMBOL_GPL(tun_get_socket);
2295
2296module_init(tun_init);
2297module_exit(tun_cleanup);
2298MODULE_DESCRIPTION(DRV_DESCRIPTION);
2299MODULE_AUTHOR(DRV_COPYRIGHT);
2300MODULE_LICENSE("GPL");
2301MODULE_ALIAS_MISCDEV(TUN_MINOR);
2302MODULE_ALIAS("devname:net/tun");
2303
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.