linux/net/core/dev.c
<<
>>
Prefs
   1/*
   2 *      NET3    Protocol independent device support routines.
   3 *
   4 *              This program is free software; you can redistribute it and/or
   5 *              modify it under the terms of the GNU General Public License
   6 *              as published by the Free Software Foundation; either version
   7 *              2 of the License, or (at your option) any later version.
   8 *
   9 *      Derived from the non IP parts of dev.c 1.0.19
  10 *              Authors:        Ross Biro
  11 *                              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *                              Mark Evans, <evansmp@uhura.aston.ac.uk>
  13 *
  14 *      Additional Authors:
  15 *              Florian la Roche <rzsfl@rz.uni-sb.de>
  16 *              Alan Cox <gw4pts@gw4pts.ampr.org>
  17 *              David Hinds <dahinds@users.sourceforge.net>
  18 *              Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
  19 *              Adam Sulmicki <adam@cfar.umd.edu>
  20 *              Pekka Riikonen <priikone@poesidon.pspt.fi>
  21 *
  22 *      Changes:
  23 *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
  24 *                                      to 2 if register_netdev gets called
  25 *                                      before net_dev_init & also removed a
  26 *                                      few lines of code in the process.
  27 *              Alan Cox        :       device private ioctl copies fields back.
  28 *              Alan Cox        :       Transmit queue code does relevant
  29 *                                      stunts to keep the queue safe.
  30 *              Alan Cox        :       Fixed double lock.
  31 *              Alan Cox        :       Fixed promisc NULL pointer trap
  32 *              ????????        :       Support the full private ioctl range
  33 *              Alan Cox        :       Moved ioctl permission check into
  34 *                                      drivers
  35 *              Tim Kordas      :       SIOCADDMULTI/SIOCDELMULTI
  36 *              Alan Cox        :       100 backlog just doesn't cut it when
  37 *                                      you start doing multicast video 8)
  38 *              Alan Cox        :       Rewrote net_bh and list manager.
  39 *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
  40 *              Alan Cox        :       Took out transmit every packet pass
  41 *                                      Saved a few bytes in the ioctl handler
  42 *              Alan Cox        :       Network driver sets packet type before
  43 *                                      calling netif_rx. Saves a function
  44 *                                      call a packet.
  45 *              Alan Cox        :       Hashed net_bh()
  46 *              Richard Kooijman:       Timestamp fixes.
  47 *              Alan Cox        :       Wrong field in SIOCGIFDSTADDR
  48 *              Alan Cox        :       Device lock protection.
  49 *              Alan Cox        :       Fixed nasty side effect of device close
  50 *                                      changes.
  51 *              Rudi Cilibrasi  :       Pass the right thing to
  52 *                                      set_mac_address()
  53 *              Dave Miller     :       32bit quantity for the device lock to
  54 *                                      make it work out on a Sparc.
  55 *              Bjorn Ekwall    :       Added KERNELD hack.
  56 *              Alan Cox        :       Cleaned up the backlog initialise.
  57 *              Craig Metz      :       SIOCGIFCONF fix if space for under
  58 *                                      1 device.
  59 *          Thomas Bogendoerfer :       Return ENODEV for dev_open, if there
  60 *                                      is no device open function.
  61 *              Andi Kleen      :       Fix error reporting for SIOCGIFCONF
  62 *          Michael Chastain    :       Fix signed/unsigned for SIOCGIFCONF
  63 *              Cyrus Durgin    :       Cleaned for KMOD
  64 *              Adam Sulmicki   :       Bug Fix : Network Device Unload
  65 *                                      A network device unload needs to purge
  66 *                                      the backlog queue.
  67 *      Paul Rusty Russell      :       SIOCSIFNAME
  68 *              Pekka Riikonen  :       Netdev boot-time settings code
  69 *              Andrew Morton   :       Make unregister_netdevice wait
  70 *                                      indefinitely on dev->refcnt
  71 *              J Hadi Salim    :       - Backlog queue sampling
  72 *                                      - netif_rx() feedback
  73 */
  74
  75#include <asm/uaccess.h>
  76#include <linux/bitops.h>
  77#include <linux/capability.h>
  78#include <linux/cpu.h>
  79#include <linux/types.h>
  80#include <linux/kernel.h>
  81#include <linux/hash.h>
  82#include <linux/slab.h>
  83#include <linux/sched.h>
  84#include <linux/mutex.h>
  85#include <linux/string.h>
  86#include <linux/mm.h>
  87#include <linux/socket.h>
  88#include <linux/sockios.h>
  89#include <linux/errno.h>
  90#include <linux/interrupt.h>
  91#include <linux/if_ether.h>
  92#include <linux/netdevice.h>
  93#include <linux/etherdevice.h>
  94#include <linux/ethtool.h>
  95#include <linux/notifier.h>
  96#include <linux/skbuff.h>
  97#include <net/net_namespace.h>
  98#include <net/sock.h>
  99#include <linux/rtnetlink.h>
 100#include <linux/proc_fs.h>
 101#include <linux/seq_file.h>
 102#include <linux/stat.h>
 103#include <net/dst.h>
 104#include <net/pkt_sched.h>
 105#include <net/checksum.h>
 106#include <net/xfrm.h>
 107#include <linux/highmem.h>
 108#include <linux/init.h>
 109#include <linux/kmod.h>
 110#include <linux/module.h>
 111#include <linux/netpoll.h>
 112#include <linux/rcupdate.h>
 113#include <linux/delay.h>
 114#include <net/wext.h>
 115#include <net/iw_handler.h>
 116#include <asm/current.h>
 117#include <linux/audit.h>
 118#include <linux/dmaengine.h>
 119#include <linux/err.h>
 120#include <linux/ctype.h>
 121#include <linux/if_arp.h>
 122#include <linux/if_vlan.h>
 123#include <linux/ip.h>
 124#include <net/ip.h>
 125#include <linux/ipv6.h>
 126#include <linux/in.h>
 127#include <linux/jhash.h>
 128#include <linux/random.h>
 129#include <trace/events/napi.h>
 130#include <trace/events/net.h>
 131#include <trace/events/skb.h>
 132#include <linux/pci.h>
 133#include <linux/inetdevice.h>
 134#include <linux/cpu_rmap.h>
 135#include <linux/net_tstamp.h>
 136#include <linux/static_key.h>
 137#include <net/flow_keys.h>
 138
 139#include "net-sysfs.h"
 140
 141/* Instead of increasing this, you should create a hash table. */
 142#define MAX_GRO_SKBS 8
 143
 144/* This should be increased if a protocol with a bigger head is added. */
 145#define GRO_MAX_HEAD (MAX_HEADER + 128)
 146
 147/*
 148 *      The list of packet types we will receive (as opposed to discard)
 149 *      and the routines to invoke.
 150 *
 151 *      Why 16. Because with 16 the only overlap we get on a hash of the
 152 *      low nibble of the protocol value is RARP/SNAP/X.25.
 153 *
 154 *      NOTE:  That is no longer true with the addition of VLAN tags.  Not
 155 *             sure which should go first, but I bet it won't make much
 156 *             difference if we are running VLANs.  The good news is that
 157 *             this protocol won't be in the list unless compiled in, so
 158 *             the average user (w/out VLANs) will not be adversely affected.
 159 *             --BLG
 160 *
 161 *              0800    IP
 162 *              8100    802.1Q VLAN
 163 *              0001    802.3
 164 *              0002    AX.25
 165 *              0004    802.2
 166 *              8035    RARP
 167 *              0005    SNAP
 168 *              0805    X.25
 169 *              0806    ARP
 170 *              8137    IPX
 171 *              0009    Localtalk
 172 *              86DD    IPv6
 173 */
 174
 175#define PTYPE_HASH_SIZE (16)
 176#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
 177
 178static DEFINE_SPINLOCK(ptype_lock);
 179static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
 180static struct list_head ptype_all __read_mostly;        /* Taps */
 181
 182/*
 183 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
 184 * semaphore.
 185 *
 186 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
 187 *
 188 * Writers must hold the rtnl semaphore while they loop through the
 189 * dev_base_head list, and hold dev_base_lock for writing when they do the
 190 * actual updates.  This allows pure readers to access the list even
 191 * while a writer is preparing to update it.
 192 *
 193 * To put it another way, dev_base_lock is held for writing only to
 194 * protect against pure readers; the rtnl semaphore provides the
 195 * protection against other writers.
 196 *
 197 * See, for example usages, register_netdevice() and
 198 * unregister_netdevice(), which must be called with the rtnl
 199 * semaphore held.
 200 */
 201DEFINE_RWLOCK(dev_base_lock);
 202EXPORT_SYMBOL(dev_base_lock);
 203
 204static inline void dev_base_seq_inc(struct net *net)
 205{
 206        while (++net->dev_base_seq == 0);
 207}
 208
 209static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
 210{
 211        unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
 212
 213        return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
 214}
 215
 216static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
 217{
 218        return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
 219}
 220
 221static inline void rps_lock(struct softnet_data *sd)
 222{
 223#ifdef CONFIG_RPS
 224        spin_lock(&sd->input_pkt_queue.lock);
 225#endif
 226}
 227
 228static inline void rps_unlock(struct softnet_data *sd)
 229{
 230#ifdef CONFIG_RPS
 231        spin_unlock(&sd->input_pkt_queue.lock);
 232#endif
 233}
 234
 235/* Device list insertion */
 236static int list_netdevice(struct net_device *dev)
 237{
 238        struct net *net = dev_net(dev);
 239
 240        ASSERT_RTNL();
 241
 242        write_lock_bh(&dev_base_lock);
 243        list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
 244        hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
 245        hlist_add_head_rcu(&dev->index_hlist,
 246                           dev_index_hash(net, dev->ifindex));
 247        write_unlock_bh(&dev_base_lock);
 248
 249        dev_base_seq_inc(net);
 250
 251        return 0;
 252}
 253
 254/* Device list removal
 255 * caller must respect a RCU grace period before freeing/reusing dev
 256 */
 257static void unlist_netdevice(struct net_device *dev)
 258{
 259        ASSERT_RTNL();
 260
 261        /* Unlink dev from the device chain */
 262        write_lock_bh(&dev_base_lock);
 263        list_del_rcu(&dev->dev_list);
 264        hlist_del_rcu(&dev->name_hlist);
 265        hlist_del_rcu(&dev->index_hlist);
 266        write_unlock_bh(&dev_base_lock);
 267
 268        dev_base_seq_inc(dev_net(dev));
 269}
 270
 271/*
 272 *      Our notifier list
 273 */
 274
 275static RAW_NOTIFIER_HEAD(netdev_chain);
 276
 277/*
 278 *      Device drivers call our routines to queue packets here. We empty the
 279 *      queue in the local softnet handler.
 280 */
 281
 282DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
 283EXPORT_PER_CPU_SYMBOL(softnet_data);
 284
 285#ifdef CONFIG_LOCKDEP
 286/*
 287 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
 288 * according to dev->type
 289 */
 290static const unsigned short netdev_lock_type[] =
 291        {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
 292         ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
 293         ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
 294         ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
 295         ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
 296         ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
 297         ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
 298         ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
 299         ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
 300         ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
 301         ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
 302         ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
 303         ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
 304         ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
 305         ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
 306
 307static const char *const netdev_lock_name[] =
 308        {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
 309         "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
 310         "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
 311         "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
 312         "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
 313         "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
 314         "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
 315         "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
 316         "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
 317         "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
 318         "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
 319         "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
 320         "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
 321         "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
 322         "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
 323
 324static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
 325static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
 326
 327static inline unsigned short netdev_lock_pos(unsigned short dev_type)
 328{
 329        int i;
 330
 331        for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
 332                if (netdev_lock_type[i] == dev_type)
 333                        return i;
 334        /* the last key is used by default */
 335        return ARRAY_SIZE(netdev_lock_type) - 1;
 336}
 337
 338static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
 339                                                 unsigned short dev_type)
 340{
 341        int i;
 342
 343        i = netdev_lock_pos(dev_type);
 344        lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
 345                                   netdev_lock_name[i]);
 346}
 347
 348static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
 349{
 350        int i;
 351
 352        i = netdev_lock_pos(dev->type);
 353        lockdep_set_class_and_name(&dev->addr_list_lock,
 354                                   &netdev_addr_lock_key[i],
 355                                   netdev_lock_name[i]);
 356}
 357#else
 358static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
 359                                                 unsigned short dev_type)
 360{
 361}
 362static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
 363{
 364}
 365#endif
 366
 367/*******************************************************************************
 368
 369                Protocol management and registration routines
 370
 371*******************************************************************************/
 372
 373/*
 374 *      Add a protocol ID to the list. Now that the input handler is
 375 *      smarter we can dispense with all the messy stuff that used to be
 376 *      here.
 377 *
 378 *      BEWARE!!! Protocol handlers, mangling input packets,
 379 *      MUST BE last in hash buckets and checking protocol handlers
 380 *      MUST start from promiscuous ptype_all chain in net_bh.
 381 *      It is true now, do not change it.
 382 *      Explanation follows: if protocol handler, mangling packet, will
 383 *      be the first on list, it is not able to sense, that packet
 384 *      is cloned and should be copied-on-write, so that it will
 385 *      change it and subsequent readers will get broken packet.
 386 *                                                      --ANK (980803)
 387 */
 388
 389static inline struct list_head *ptype_head(const struct packet_type *pt)
 390{
 391        if (pt->type == htons(ETH_P_ALL))
 392                return &ptype_all;
 393        else
 394                return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
 395}
 396
 397/**
 398 *      dev_add_pack - add packet handler
 399 *      @pt: packet type declaration
 400 *
 401 *      Add a protocol handler to the networking stack. The passed &packet_type
 402 *      is linked into kernel lists and may not be freed until it has been
 403 *      removed from the kernel lists.
 404 *
 405 *      This call does not sleep therefore it can not
 406 *      guarantee all CPU's that are in middle of receiving packets
 407 *      will see the new packet type (until the next received packet).
 408 */
 409
 410void dev_add_pack(struct packet_type *pt)
 411{
 412        struct list_head *head = ptype_head(pt);
 413
 414        spin_lock(&ptype_lock);
 415        list_add_rcu(&pt->list, head);
 416        spin_unlock(&ptype_lock);
 417}
 418EXPORT_SYMBOL(dev_add_pack);
 419
 420/**
 421 *      __dev_remove_pack        - remove packet handler
 422 *      @pt: packet type declaration
 423 *
 424 *      Remove a protocol handler that was previously added to the kernel
 425 *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
 426 *      from the kernel lists and can be freed or reused once this function
 427 *      returns.
 428 *
 429 *      The packet type might still be in use by receivers
 430 *      and must not be freed until after all the CPU's have gone
 431 *      through a quiescent state.
 432 */
 433void __dev_remove_pack(struct packet_type *pt)
 434{
 435        struct list_head *head = ptype_head(pt);
 436        struct packet_type *pt1;
 437
 438        spin_lock(&ptype_lock);
 439
 440        list_for_each_entry(pt1, head, list) {
 441                if (pt == pt1) {
 442                        list_del_rcu(&pt->list);
 443                        goto out;
 444                }
 445        }
 446
 447        pr_warn("dev_remove_pack: %p not found\n", pt);
 448out:
 449        spin_unlock(&ptype_lock);
 450}
 451EXPORT_SYMBOL(__dev_remove_pack);
 452
 453/**
 454 *      dev_remove_pack  - remove packet handler
 455 *      @pt: packet type declaration
 456 *
 457 *      Remove a protocol handler that was previously added to the kernel
 458 *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
 459 *      from the kernel lists and can be freed or reused once this function
 460 *      returns.
 461 *
 462 *      This call sleeps to guarantee that no CPU is looking at the packet
 463 *      type after return.
 464 */
 465void dev_remove_pack(struct packet_type *pt)
 466{
 467        __dev_remove_pack(pt);
 468
 469        synchronize_net();
 470}
 471EXPORT_SYMBOL(dev_remove_pack);
 472
 473/******************************************************************************
 474
 475                      Device Boot-time Settings Routines
 476
 477*******************************************************************************/
 478
 479/* Boot time configuration table */
 480static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
 481
 482/**
 483 *      netdev_boot_setup_add   - add new setup entry
 484 *      @name: name of the device
 485 *      @map: configured settings for the device
 486 *
 487 *      Adds new setup entry to the dev_boot_setup list.  The function
 488 *      returns 0 on error and 1 on success.  This is a generic routine to
 489 *      all netdevices.
 490 */
 491static int netdev_boot_setup_add(char *name, struct ifmap *map)
 492{
 493        struct netdev_boot_setup *s;
 494        int i;
 495
 496        s = dev_boot_setup;
 497        for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
 498                if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
 499                        memset(s[i].name, 0, sizeof(s[i].name));
 500                        strlcpy(s[i].name, name, IFNAMSIZ);
 501                        memcpy(&s[i].map, map, sizeof(s[i].map));
 502                        break;
 503                }
 504        }
 505
 506        return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
 507}
 508
 509/**
 510 *      netdev_boot_setup_check - check boot time settings
 511 *      @dev: the netdevice
 512 *
 513 *      Check boot time settings for the device.
 514 *      The found settings are set for the device to be used
 515 *      later in the device probing.
 516 *      Returns 0 if no settings found, 1 if they are.
 517 */
 518int netdev_boot_setup_check(struct net_device *dev)
 519{
 520        struct netdev_boot_setup *s = dev_boot_setup;
 521        int i;
 522
 523        for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
 524                if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
 525                    !strcmp(dev->name, s[i].name)) {
 526                        dev->irq        = s[i].map.irq;
 527                        dev->base_addr  = s[i].map.base_addr;
 528                        dev->mem_start  = s[i].map.mem_start;
 529                        dev->mem_end    = s[i].map.mem_end;
 530                        return 1;
 531                }
 532        }
 533        return 0;
 534}
 535EXPORT_SYMBOL(netdev_boot_setup_check);
 536
 537
 538/**
 539 *      netdev_boot_base        - get address from boot time settings
 540 *      @prefix: prefix for network device
 541 *      @unit: id for network device
 542 *
 543 *      Check boot time settings for the base address of device.
 544 *      The found settings are set for the device to be used
 545 *      later in the device probing.
 546 *      Returns 0 if no settings found.
 547 */
 548unsigned long netdev_boot_base(const char *prefix, int unit)
 549{
 550        const struct netdev_boot_setup *s = dev_boot_setup;
 551        char name[IFNAMSIZ];
 552        int i;
 553
 554        sprintf(name, "%s%d", prefix, unit);
 555
 556        /*
 557         * If device already registered then return base of 1
 558         * to indicate not to probe for this interface
 559         */
 560        if (__dev_get_by_name(&init_net, name))
 561                return 1;
 562
 563        for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
 564                if (!strcmp(name, s[i].name))
 565                        return s[i].map.base_addr;
 566        return 0;
 567}
 568
 569/*
 570 * Saves at boot time configured settings for any netdevice.
 571 */
 572int __init netdev_boot_setup(char *str)
 573{
 574        int ints[5];
 575        struct ifmap map;
 576
 577        str = get_options(str, ARRAY_SIZE(ints), ints);
 578        if (!str || !*str)
 579                return 0;
 580
 581        /* Save settings */
 582        memset(&map, 0, sizeof(map));
 583        if (ints[0] > 0)
 584                map.irq = ints[1];
 585        if (ints[0] > 1)
 586                map.base_addr = ints[2];
 587        if (ints[0] > 2)
 588                map.mem_start = ints[3];
 589        if (ints[0] > 3)
 590                map.mem_end = ints[4];
 591
 592        /* Add new entry to the list */
 593        return netdev_boot_setup_add(str, &map);
 594}
 595
 596__setup("netdev=", netdev_boot_setup);
 597
 598/*******************************************************************************
 599
 600                            Device Interface Subroutines
 601
 602*******************************************************************************/
 603
 604/**
 605 *      __dev_get_by_name       - find a device by its name
 606 *      @net: the applicable net namespace
 607 *      @name: name to find
 608 *
 609 *      Find an interface by name. Must be called under RTNL semaphore
 610 *      or @dev_base_lock. If the name is found a pointer to the device
 611 *      is returned. If the name is not found then %NULL is returned. The
 612 *      reference counters are not incremented so the caller must be
 613 *      careful with locks.
 614 */
 615
 616struct net_device *__dev_get_by_name(struct net *net, const char *name)
 617{
 618        struct hlist_node *p;
 619        struct net_device *dev;
 620        struct hlist_head *head = dev_name_hash(net, name);
 621
 622        hlist_for_each_entry(dev, p, head, name_hlist)
 623                if (!strncmp(dev->name, name, IFNAMSIZ))
 624                        return dev;
 625
 626        return NULL;
 627}
 628EXPORT_SYMBOL(__dev_get_by_name);
 629
 630/**
 631 *      dev_get_by_name_rcu     - find a device by its name
 632 *      @net: the applicable net namespace
 633 *      @name: name to find
 634 *
 635 *      Find an interface by name.
 636 *      If the name is found a pointer to the device is returned.
 637 *      If the name is not found then %NULL is returned.
 638 *      The reference counters are not incremented so the caller must be
 639 *      careful with locks. The caller must hold RCU lock.
 640 */
 641
 642struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
 643{
 644        struct hlist_node *p;
 645        struct net_device *dev;
 646        struct hlist_head *head = dev_name_hash(net, name);
 647
 648        hlist_for_each_entry_rcu(dev, p, head, name_hlist)
 649                if (!strncmp(dev->name, name, IFNAMSIZ))
 650                        return dev;
 651
 652        return NULL;
 653}
 654EXPORT_SYMBOL(dev_get_by_name_rcu);
 655
 656/**
 657 *      dev_get_by_name         - find a device by its name
 658 *      @net: the applicable net namespace
 659 *      @name: name to find
 660 *
 661 *      Find an interface by name. This can be called from any
 662 *      context and does its own locking. The returned handle has
 663 *      the usage count incremented and the caller must use dev_put() to
 664 *      release it when it is no longer needed. %NULL is returned if no
 665 *      matching device is found.
 666 */
 667
 668struct net_device *dev_get_by_name(struct net *net, const char *name)
 669{
 670        struct net_device *dev;
 671
 672        rcu_read_lock();
 673        dev = dev_get_by_name_rcu(net, name);
 674        if (dev)
 675                dev_hold(dev);
 676        rcu_read_unlock();
 677        return dev;
 678}
 679EXPORT_SYMBOL(dev_get_by_name);
 680
 681/**
 682 *      __dev_get_by_index - find a device by its ifindex
 683 *      @net: the applicable net namespace
 684 *      @ifindex: index of device
 685 *
 686 *      Search for an interface by index. Returns %NULL if the device
 687 *      is not found or a pointer to the device. The device has not
 688 *      had its reference counter increased so the caller must be careful
 689 *      about locking. The caller must hold either the RTNL semaphore
 690 *      or @dev_base_lock.
 691 */
 692
 693struct net_device *__dev_get_by_index(struct net *net, int ifindex)
 694{
 695        struct hlist_node *p;
 696        struct net_device *dev;
 697        struct hlist_head *head = dev_index_hash(net, ifindex);
 698
 699        hlist_for_each_entry(dev, p, head, index_hlist)
 700                if (dev->ifindex == ifindex)
 701                        return dev;
 702
 703        return NULL;
 704}
 705EXPORT_SYMBOL(__dev_get_by_index);
 706
 707/**
 708 *      dev_get_by_index_rcu - find a device by its ifindex
 709 *      @net: the applicable net namespace
 710 *      @ifindex: index of device
 711 *
 712 *      Search for an interface by index. Returns %NULL if the device
 713 *      is not found or a pointer to the device. The device has not
 714 *      had its reference counter increased so the caller must be careful
 715 *      about locking. The caller must hold RCU lock.
 716 */
 717
 718struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
 719{
 720        struct hlist_node *p;
 721        struct net_device *dev;
 722        struct hlist_head *head = dev_index_hash(net, ifindex);
 723
 724        hlist_for_each_entry_rcu(dev, p, head, index_hlist)
 725                if (dev->ifindex == ifindex)
 726                        return dev;
 727
 728        return NULL;
 729}
 730EXPORT_SYMBOL(dev_get_by_index_rcu);
 731
 732
 733/**
 734 *      dev_get_by_index - find a device by its ifindex
 735 *      @net: the applicable net namespace
 736 *      @ifindex: index of device
 737 *
 738 *      Search for an interface by index. Returns NULL if the device
 739 *      is not found or a pointer to the device. The device returned has
 740 *      had a reference added and the pointer is safe until the user calls
 741 *      dev_put to indicate they have finished with it.
 742 */
 743
 744struct net_device *dev_get_by_index(struct net *net, int ifindex)
 745{
 746        struct net_device *dev;
 747
 748        rcu_read_lock();
 749        dev = dev_get_by_index_rcu(net, ifindex);
 750        if (dev)
 751                dev_hold(dev);
 752        rcu_read_unlock();
 753        return dev;
 754}
 755EXPORT_SYMBOL(dev_get_by_index);
 756
 757/**
 758 *      dev_getbyhwaddr_rcu - find a device by its hardware address
 759 *      @net: the applicable net namespace
 760 *      @type: media type of device
 761 *      @ha: hardware address
 762 *
 763 *      Search for an interface by MAC address. Returns NULL if the device
 764 *      is not found or a pointer to the device.
 765 *      The caller must hold RCU or RTNL.
 766 *      The returned device has not had its ref count increased
 767 *      and the caller must therefore be careful about locking
 768 *
 769 */
 770
 771struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
 772                                       const char *ha)
 773{
 774        struct net_device *dev;
 775
 776        for_each_netdev_rcu(net, dev)
 777                if (dev->type == type &&
 778                    !memcmp(dev->dev_addr, ha, dev->addr_len))
 779                        return dev;
 780
 781        return NULL;
 782}
 783EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
 784
 785struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
 786{
 787        struct net_device *dev;
 788
 789        ASSERT_RTNL();
 790        for_each_netdev(net, dev)
 791                if (dev->type == type)
 792                        return dev;
 793
 794        return NULL;
 795}
 796EXPORT_SYMBOL(__dev_getfirstbyhwtype);
 797
 798struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
 799{
 800        struct net_device *dev, *ret = NULL;
 801
 802        rcu_read_lock();
 803        for_each_netdev_rcu(net, dev)
 804                if (dev->type == type) {
 805                        dev_hold(dev);
 806                        ret = dev;
 807                        break;
 808                }
 809        rcu_read_unlock();
 810        return ret;
 811}
 812EXPORT_SYMBOL(dev_getfirstbyhwtype);
 813
 814/**
 815 *      dev_get_by_flags_rcu - find any device with given flags
 816 *      @net: the applicable net namespace
 817 *      @if_flags: IFF_* values
 818 *      @mask: bitmask of bits in if_flags to check
 819 *
 820 *      Search for any interface with the given flags. Returns NULL if a device
 821 *      is not found or a pointer to the device. Must be called inside
 822 *      rcu_read_lock(), and result refcount is unchanged.
 823 */
 824
 825struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
 826                                    unsigned short mask)
 827{
 828        struct net_device *dev, *ret;
 829
 830        ret = NULL;
 831        for_each_netdev_rcu(net, dev) {
 832                if (((dev->flags ^ if_flags) & mask) == 0) {
 833                        ret = dev;
 834                        break;
 835                }
 836        }
 837        return ret;
 838}
 839EXPORT_SYMBOL(dev_get_by_flags_rcu);
 840
 841/**
 842 *      dev_valid_name - check if name is okay for network device
 843 *      @name: name string
 844 *
 845 *      Network device names need to be valid file names to
 846 *      to allow sysfs to work.  We also disallow any kind of
 847 *      whitespace.
 848 */
 849bool dev_valid_name(const char *name)
 850{
 851        if (*name == '\0')
 852                return false;
 853        if (strlen(name) >= IFNAMSIZ)
 854                return false;
 855        if (!strcmp(name, ".") || !strcmp(name, ".."))
 856                return false;
 857
 858        while (*name) {
 859                if (*name == '/' || isspace(*name))
 860                        return false;
 861                name++;
 862        }
 863        return true;
 864}
 865EXPORT_SYMBOL(dev_valid_name);
 866
 867/**
 868 *      __dev_alloc_name - allocate a name for a device
 869 *      @net: network namespace to allocate the device name in
 870 *      @name: name format string
 871 *      @buf:  scratch buffer and result name string
 872 *
 873 *      Passed a format string - eg "lt%d" it will try and find a suitable
 874 *      id. It scans list of devices to build up a free map, then chooses
 875 *      the first empty slot. The caller must hold the dev_base or rtnl lock
 876 *      while allocating the name and adding the device in order to avoid
 877 *      duplicates.
 878 *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
 879 *      Returns the number of the unit assigned or a negative errno code.
 880 */
 881
 882static int __dev_alloc_name(struct net *net, const char *name, char *buf)
 883{
 884        int i = 0;
 885        const char *p;
 886        const int max_netdevices = 8*PAGE_SIZE;
 887        unsigned long *inuse;
 888        struct net_device *d;
 889
 890        p = strnchr(name, IFNAMSIZ-1, '%');
 891        if (p) {
 892                /*
 893                 * Verify the string as this thing may have come from
 894                 * the user.  There must be either one "%d" and no other "%"
 895                 * characters.
 896                 */
 897                if (p[1] != 'd' || strchr(p + 2, '%'))
 898                        return -EINVAL;
 899
 900                /* Use one page as a bit array of possible slots */
 901                inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
 902                if (!inuse)
 903                        return -ENOMEM;
 904
 905                for_each_netdev(net, d) {
 906                        if (!sscanf(d->name, name, &i))
 907                                continue;
 908                        if (i < 0 || i >= max_netdevices)
 909                                continue;
 910
 911                        /*  avoid cases where sscanf is not exact inverse of printf */
 912                        snprintf(buf, IFNAMSIZ, name, i);
 913                        if (!strncmp(buf, d->name, IFNAMSIZ))
 914                                set_bit(i, inuse);
 915                }
 916
 917                i = find_first_zero_bit(inuse, max_netdevices);
 918                free_page((unsigned long) inuse);
 919        }
 920
 921        if (buf != name)
 922                snprintf(buf, IFNAMSIZ, name, i);
 923        if (!__dev_get_by_name(net, buf))
 924                return i;
 925
 926        /* It is possible to run out of possible slots
 927         * when the name is long and there isn't enough space left
 928         * for the digits, or if all bits are used.
 929         */
 930        return -ENFILE;
 931}
 932
 933/**
 934 *      dev_alloc_name - allocate a name for a device
 935 *      @dev: device
 936 *      @name: name format string
 937 *
 938 *      Passed a format string - eg "lt%d" it will try and find a suitable
 939 *      id. It scans list of devices to build up a free map, then chooses
 940 *      the first empty slot. The caller must hold the dev_base or rtnl lock
 941 *      while allocating the name and adding the device in order to avoid
 942 *      duplicates.
 943 *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
 944 *      Returns the number of the unit assigned or a negative errno code.
 945 */
 946
 947int dev_alloc_name(struct net_device *dev, const char *name)
 948{
 949        char buf[IFNAMSIZ];
 950        struct net *net;
 951        int ret;
 952
 953        BUG_ON(!dev_net(dev));
 954        net = dev_net(dev);
 955        ret = __dev_alloc_name(net, name, buf);
 956        if (ret >= 0)
 957                strlcpy(dev->name, buf, IFNAMSIZ);
 958        return ret;
 959}
 960EXPORT_SYMBOL(dev_alloc_name);
 961
 962static int dev_get_valid_name(struct net_device *dev, const char *name)
 963{
 964        struct net *net;
 965
 966        BUG_ON(!dev_net(dev));
 967        net = dev_net(dev);
 968
 969        if (!dev_valid_name(name))
 970                return -EINVAL;
 971
 972        if (strchr(name, '%'))
 973                return dev_alloc_name(dev, name);
 974        else if (__dev_get_by_name(net, name))
 975                return -EEXIST;
 976        else if (dev->name != name)
 977                strlcpy(dev->name, name, IFNAMSIZ);
 978
 979        return 0;
 980}
 981
 982/**
 983 *      dev_change_name - change name of a device
 984 *      @dev: device
 985 *      @newname: name (or format string) must be at least IFNAMSIZ
 986 *
 987 *      Change name of a device, can pass format strings "eth%d".
 988 *      for wildcarding.
 989 */
 990int dev_change_name(struct net_device *dev, const char *newname)
 991{
 992        char oldname[IFNAMSIZ];
 993        int err = 0;
 994        int ret;
 995        struct net *net;
 996
 997        ASSERT_RTNL();
 998        BUG_ON(!dev_net(dev));
 999
1000        net = dev_net(dev);
1001        if (dev->flags & IFF_UP)
1002                return -EBUSY;
1003
1004        if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
1005                return 0;
1006
1007        memcpy(oldname, dev->name, IFNAMSIZ);
1008
1009        err = dev_get_valid_name(dev, newname);
1010        if (err < 0)
1011                return err;
1012
1013rollback:
1014        ret = device_rename(&dev->dev, dev->name);
1015        if (ret) {
1016                memcpy(dev->name, oldname, IFNAMSIZ);
1017                return ret;
1018        }
1019
1020        write_lock_bh(&dev_base_lock);
1021        hlist_del_rcu(&dev->name_hlist);
1022        write_unlock_bh(&dev_base_lock);
1023
1024        synchronize_rcu();
1025
1026        write_lock_bh(&dev_base_lock);
1027        hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1028        write_unlock_bh(&dev_base_lock);
1029
1030        ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1031        ret = notifier_to_errno(ret);
1032
1033        if (ret) {
1034                /* err >= 0 after dev_alloc_name() or stores the first errno */
1035                if (err >= 0) {
1036                        err = ret;
1037                        memcpy(dev->name, oldname, IFNAMSIZ);
1038                        goto rollback;
1039                } else {
1040                        pr_err("%s: name change rollback failed: %d\n",
1041                               dev->name, ret);
1042                }
1043        }
1044
1045        return err;
1046}
1047
1048/**
1049 *      dev_set_alias - change ifalias of a device
1050 *      @dev: device
1051 *      @alias: name up to IFALIASZ
1052 *      @len: limit of bytes to copy from info
1053 *
1054 *      Set ifalias for a device,
1055 */
1056int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1057{
1058        char *new_ifalias;
1059
1060        ASSERT_RTNL();
1061
1062        if (len >= IFALIASZ)
1063                return -EINVAL;
1064
1065        if (!len) {
1066                if (dev->ifalias) {
1067                        kfree(dev->ifalias);
1068                        dev->ifalias = NULL;
1069                }
1070                return 0;
1071        }
1072
1073        new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1074        if (!new_ifalias)
1075                return -ENOMEM;
1076        dev->ifalias = new_ifalias;
1077
1078        strlcpy(dev->ifalias, alias, len+1);
1079        return len;
1080}
1081
1082
1083/**
1084 *      netdev_features_change - device changes features
1085 *      @dev: device to cause notification
1086 *
1087 *      Called to indicate a device has changed features.
1088 */
1089void netdev_features_change(struct net_device *dev)
1090{
1091        call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1092}
1093EXPORT_SYMBOL(netdev_features_change);
1094
1095/**
1096 *      netdev_state_change - device changes state
1097 *      @dev: device to cause notification
1098 *
1099 *      Called to indicate a device has changed state. This function calls
1100 *      the notifier chains for netdev_chain and sends a NEWLINK message
1101 *      to the routing socket.
1102 */
1103void netdev_state_change(struct net_device *dev)
1104{
1105        if (dev->flags & IFF_UP) {
1106                call_netdevice_notifiers(NETDEV_CHANGE, dev);
1107                rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1108        }
1109}
1110EXPORT_SYMBOL(netdev_state_change);
1111
1112int netdev_bonding_change(struct net_device *dev, unsigned long event)
1113{
1114        return call_netdevice_notifiers(event, dev);
1115}
1116EXPORT_SYMBOL(netdev_bonding_change);
1117
1118/**
1119 *      dev_load        - load a network module
1120 *      @net: the applicable net namespace
1121 *      @name: name of interface
1122 *
1123 *      If a network interface is not present and the process has suitable
1124 *      privileges this function loads the module. If module loading is not
1125 *      available in this kernel then it becomes a nop.
1126 */
1127
1128void dev_load(struct net *net, const char *name)
1129{
1130        struct net_device *dev;
1131        int no_module;
1132
1133        rcu_read_lock();
1134        dev = dev_get_by_name_rcu(net, name);
1135        rcu_read_unlock();
1136
1137        no_module = !dev;
1138        if (no_module && capable(CAP_NET_ADMIN))
1139                no_module = request_module("netdev-%s", name);
1140        if (no_module && capable(CAP_SYS_MODULE)) {
1141                if (!request_module("%s", name))
1142                        pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1143                                name);
1144        }
1145}
1146EXPORT_SYMBOL(dev_load);
1147
1148static int __dev_open(struct net_device *dev)
1149{
1150        const struct net_device_ops *ops = dev->netdev_ops;
1151        int ret;
1152
1153        ASSERT_RTNL();
1154
1155        if (!netif_device_present(dev))
1156                return -ENODEV;
1157
1158        ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1159        ret = notifier_to_errno(ret);
1160        if (ret)
1161                return ret;
1162
1163        set_bit(__LINK_STATE_START, &dev->state);
1164
1165        if (ops->ndo_validate_addr)
1166                ret = ops->ndo_validate_addr(dev);
1167
1168        if (!ret && ops->ndo_open)
1169                ret = ops->ndo_open(dev);
1170
1171        if (ret)
1172                clear_bit(__LINK_STATE_START, &dev->state);
1173        else {
1174                dev->flags |= IFF_UP;
1175                net_dmaengine_get();
1176                dev_set_rx_mode(dev);
1177                dev_activate(dev);
1178                add_device_randomness(dev->dev_addr, dev->addr_len);
1179        }
1180
1181        return ret;
1182}
1183
1184/**
1185 *      dev_open        - prepare an interface for use.
1186 *      @dev:   device to open
1187 *
1188 *      Takes a device from down to up state. The device's private open
1189 *      function is invoked and then the multicast lists are loaded. Finally
1190 *      the device is moved into the up state and a %NETDEV_UP message is
1191 *      sent to the netdev notifier chain.
1192 *
1193 *      Calling this function on an active interface is a nop. On a failure
1194 *      a negative errno code is returned.
1195 */
1196int dev_open(struct net_device *dev)
1197{
1198        int ret;
1199
1200        if (dev->flags & IFF_UP)
1201                return 0;
1202
1203        ret = __dev_open(dev);
1204        if (ret < 0)
1205                return ret;
1206
1207        rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1208        call_netdevice_notifiers(NETDEV_UP, dev);
1209
1210        return ret;
1211}
1212EXPORT_SYMBOL(dev_open);
1213
1214static int __dev_close_many(struct list_head *head)
1215{
1216        struct net_device *dev;
1217
1218        ASSERT_RTNL();
1219        might_sleep();
1220
1221        list_for_each_entry(dev, head, unreg_list) {
1222                call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1223
1224                clear_bit(__LINK_STATE_START, &dev->state);
1225
1226                /* Synchronize to scheduled poll. We cannot touch poll list, it
1227                 * can be even on different cpu. So just clear netif_running().
1228                 *
1229                 * dev->stop() will invoke napi_disable() on all of it's
1230                 * napi_struct instances on this device.
1231                 */
1232                smp_mb__after_clear_bit(); /* Commit netif_running(). */
1233        }
1234
1235        dev_deactivate_many(head);
1236
1237        list_for_each_entry(dev, head, unreg_list) {
1238                const struct net_device_ops *ops = dev->netdev_ops;
1239
1240                /*
1241                 *      Call the device specific close. This cannot fail.
1242                 *      Only if device is UP
1243                 *
1244                 *      We allow it to be called even after a DETACH hot-plug
1245                 *      event.
1246                 */
1247                if (ops->ndo_stop)
1248                        ops->ndo_stop(dev);
1249
1250                dev->flags &= ~IFF_UP;
1251                net_dmaengine_put();
1252        }
1253
1254        return 0;
1255}
1256
1257static int __dev_close(struct net_device *dev)
1258{
1259        int retval;
1260        LIST_HEAD(single);
1261
1262        list_add(&dev->unreg_list, &single);
1263        retval = __dev_close_many(&single);
1264        list_del(&single);
1265        return retval;
1266}
1267
1268static int dev_close_many(struct list_head *head)
1269{
1270        struct net_device *dev, *tmp;
1271        LIST_HEAD(tmp_list);
1272
1273        list_for_each_entry_safe(dev, tmp, head, unreg_list)
1274                if (!(dev->flags & IFF_UP))
1275                        list_move(&dev->unreg_list, &tmp_list);
1276
1277        __dev_close_many(head);
1278
1279        list_for_each_entry(dev, head, unreg_list) {
1280                rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1281                call_netdevice_notifiers(NETDEV_DOWN, dev);
1282        }
1283
1284        /* rollback_registered_many needs the complete original list */
1285        list_splice(&tmp_list, head);
1286        return 0;
1287}
1288
1289/**
1290 *      dev_close - shutdown an interface.
1291 *      @dev: device to shutdown
1292 *
1293 *      This function moves an active device into down state. A
1294 *      %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1295 *      is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1296 *      chain.
1297 */
1298int dev_close(struct net_device *dev)
1299{
1300        if (dev->flags & IFF_UP) {
1301                LIST_HEAD(single);
1302
1303                list_add(&dev->unreg_list, &single);
1304                dev_close_many(&single);
1305                list_del(&single);
1306        }
1307        return 0;
1308}
1309EXPORT_SYMBOL(dev_close);
1310
1311
1312/**
1313 *      dev_disable_lro - disable Large Receive Offload on a device
1314 *      @dev: device
1315 *
1316 *      Disable Large Receive Offload (LRO) on a net device.  Must be
1317 *      called under RTNL.  This is needed if received packets may be
1318 *      forwarded to another interface.
1319 */
1320void dev_disable_lro(struct net_device *dev)
1321{
1322        /*
1323         * If we're trying to disable lro on a vlan device
1324         * use the underlying physical device instead
1325         */
1326        if (is_vlan_dev(dev))
1327                dev = vlan_dev_real_dev(dev);
1328
1329        dev->wanted_features &= ~NETIF_F_LRO;
1330        netdev_update_features(dev);
1331
1332        if (unlikely(dev->features & NETIF_F_LRO))
1333                netdev_WARN(dev, "failed to disable LRO!\n");
1334}
1335EXPORT_SYMBOL(dev_disable_lro);
1336
1337
1338static int dev_boot_phase = 1;
1339
1340/**
1341 *      register_netdevice_notifier - register a network notifier block
1342 *      @nb: notifier
1343 *
1344 *      Register a notifier to be called when network device events occur.
1345 *      The notifier passed is linked into the kernel structures and must
1346 *      not be reused until it has been unregistered. A negative errno code
1347 *      is returned on a failure.
1348 *
1349 *      When registered all registration and up events are replayed
1350 *      to the new notifier to allow device to have a race free
1351 *      view of the network device list.
1352 */
1353
1354int register_netdevice_notifier(struct notifier_block *nb)
1355{
1356        struct net_device *dev;
1357        struct net_device *last;
1358        struct net *net;
1359        int err;
1360
1361        rtnl_lock();
1362        err = raw_notifier_chain_register(&netdev_chain, nb);
1363        if (err)
1364                goto unlock;
1365        if (dev_boot_phase)
1366                goto unlock;
1367        for_each_net(net) {
1368                for_each_netdev(net, dev) {
1369                        err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1370                        err = notifier_to_errno(err);
1371                        if (err)
1372                                goto rollback;
1373
1374                        if (!(dev->flags & IFF_UP))
1375                                continue;
1376
1377                        nb->notifier_call(nb, NETDEV_UP, dev);
1378                }
1379        }
1380
1381unlock:
1382        rtnl_unlock();
1383        return err;
1384
1385rollback:
1386        last = dev;
1387        for_each_net(net) {
1388                for_each_netdev(net, dev) {
1389                        if (dev == last)
1390                                goto outroll;
1391
1392                        if (dev->flags & IFF_UP) {
1393                                nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1394                                nb->notifier_call(nb, NETDEV_DOWN, dev);
1395                        }
1396                        nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1397                        nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1398                }
1399        }
1400
1401outroll:
1402        raw_notifier_chain_unregister(&netdev_chain, nb);
1403        goto unlock;
1404}
1405EXPORT_SYMBOL(register_netdevice_notifier);
1406
1407/**
1408 *      unregister_netdevice_notifier - unregister a network notifier block
1409 *      @nb: notifier
1410 *
1411 *      Unregister a notifier previously registered by
1412 *      register_netdevice_notifier(). The notifier is unlinked into the
1413 *      kernel structures and may then be reused. A negative errno code
1414 *      is returned on a failure.
1415 *
1416 *      After unregistering unregister and down device events are synthesized
1417 *      for all devices on the device list to the removed notifier to remove
1418 *      the need for special case cleanup code.
1419 */
1420
1421int unregister_netdevice_notifier(struct notifier_block *nb)
1422{
1423        struct net_device *dev;
1424        struct net *net;
1425        int err;
1426
1427        rtnl_lock();
1428        err = raw_notifier_chain_unregister(&netdev_chain, nb);
1429        if (err)
1430                goto unlock;
1431
1432        for_each_net(net) {
1433                for_each_netdev(net, dev) {
1434                        if (dev->flags & IFF_UP) {
1435                                nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1436                                nb->notifier_call(nb, NETDEV_DOWN, dev);
1437                        }
1438                        nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1439                        nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1440                }
1441        }
1442unlock:
1443        rtnl_unlock();
1444        return err;
1445}
1446EXPORT_SYMBOL(unregister_netdevice_notifier);
1447
1448/**
1449 *      call_netdevice_notifiers - call all network notifier blocks
1450 *      @val: value passed unmodified to notifier function
1451 *      @dev: net_device pointer passed unmodified to notifier function
1452 *
1453 *      Call all network notifier blocks.  Parameters and return value
1454 *      are as for raw_notifier_call_chain().
1455 */
1456
1457int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1458{
1459        ASSERT_RTNL();
1460        return raw_notifier_call_chain(&netdev_chain, val, dev);
1461}
1462EXPORT_SYMBOL(call_netdevice_notifiers);
1463
1464static struct static_key netstamp_needed __read_mostly;
1465#ifdef HAVE_JUMP_LABEL
1466/* We are not allowed to call static_key_slow_dec() from irq context
1467 * If net_disable_timestamp() is called from irq context, defer the
1468 * static_key_slow_dec() calls.
1469 */
1470static atomic_t netstamp_needed_deferred;
1471#endif
1472
1473void net_enable_timestamp(void)
1474{
1475#ifdef HAVE_JUMP_LABEL
1476        int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1477
1478        if (deferred) {
1479                while (--deferred)
1480                        static_key_slow_dec(&netstamp_needed);
1481                return;
1482        }
1483#endif
1484        WARN_ON(in_interrupt());
1485        static_key_slow_inc(&netstamp_needed);
1486}
1487EXPORT_SYMBOL(net_enable_timestamp);
1488
1489void net_disable_timestamp(void)
1490{
1491#ifdef HAVE_JUMP_LABEL
1492        if (in_interrupt()) {
1493                atomic_inc(&netstamp_needed_deferred);
1494                return;
1495        }
1496#endif
1497        static_key_slow_dec(&netstamp_needed);
1498}
1499EXPORT_SYMBOL(net_disable_timestamp);
1500
1501static inline void net_timestamp_set(struct sk_buff *skb)
1502{
1503        skb->tstamp.tv64 = 0;
1504        if (static_key_false(&netstamp_needed))
1505                __net_timestamp(skb);
1506}
1507
1508#define net_timestamp_check(COND, SKB)                  \
1509        if (static_key_false(&netstamp_needed)) {               \
1510                if ((COND) && !(SKB)->tstamp.tv64)      \
1511                        __net_timestamp(SKB);           \
1512        }                                               \
1513
1514static int net_hwtstamp_validate(struct ifreq *ifr)
1515{
1516        struct hwtstamp_config cfg;
1517        enum hwtstamp_tx_types tx_type;
1518        enum hwtstamp_rx_filters rx_filter;
1519        int tx_type_valid = 0;
1520        int rx_filter_valid = 0;
1521
1522        if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1523                return -EFAULT;
1524
1525        if (cfg.flags) /* reserved for future extensions */
1526                return -EINVAL;
1527
1528        tx_type = cfg.tx_type;
1529        rx_filter = cfg.rx_filter;
1530
1531        switch (tx_type) {
1532        case HWTSTAMP_TX_OFF:
1533        case HWTSTAMP_TX_ON:
1534        case HWTSTAMP_TX_ONESTEP_SYNC:
1535                tx_type_valid = 1;
1536                break;
1537        }
1538
1539        switch (rx_filter) {
1540        case HWTSTAMP_FILTER_NONE:
1541        case HWTSTAMP_FILTER_ALL:
1542        case HWTSTAMP_FILTER_SOME:
1543        case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1544        case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1545        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1546        case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1547        case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1548        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1549        case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1550        case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1551        case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1552        case HWTSTAMP_FILTER_PTP_V2_EVENT:
1553        case HWTSTAMP_FILTER_PTP_V2_SYNC:
1554        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1555                rx_filter_valid = 1;
1556                break;
1557        }
1558
1559        if (!tx_type_valid || !rx_filter_valid)
1560                return -ERANGE;
1561
1562        return 0;
1563}
1564
1565static inline bool is_skb_forwardable(struct net_device *dev,
1566                                      struct sk_buff *skb)
1567{
1568        unsigned int len;
1569
1570        if (!(dev->flags & IFF_UP))
1571                return false;
1572
1573        len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1574        if (skb->len <= len)
1575                return true;
1576
1577        /* if TSO is enabled, we don't care about the length as the packet
1578         * could be forwarded without being segmented before
1579         */
1580        if (skb_is_gso(skb))
1581                return true;
1582
1583        return false;
1584}
1585
1586/**
1587 * dev_forward_skb - loopback an skb to another netif
1588 *
1589 * @dev: destination network device
1590 * @skb: buffer to forward
1591 *
1592 * return values:
1593 *      NET_RX_SUCCESS  (no congestion)
1594 *      NET_RX_DROP     (packet was dropped, but freed)
1595 *
1596 * dev_forward_skb can be used for injecting an skb from the
1597 * start_xmit function of one device into the receive queue
1598 * of another device.
1599 *
1600 * The receiving device may be in another namespace, so
1601 * we have to clear all information in the skb that could
1602 * impact namespace isolation.
1603 */
1604int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1605{
1606        if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1607                if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1608                        atomic_long_inc(&dev->rx_dropped);
1609                        kfree_skb(skb);
1610                        return NET_RX_DROP;
1611                }
1612        }
1613
1614        skb_orphan(skb);
1615        nf_reset(skb);
1616
1617        if (unlikely(!is_skb_forwardable(dev, skb))) {
1618                atomic_long_inc(&dev->rx_dropped);
1619                kfree_skb(skb);
1620                return NET_RX_DROP;
1621        }
1622        skb->skb_iif = 0;
1623        skb->dev = dev;
1624        skb_dst_drop(skb);
1625        skb->tstamp.tv64 = 0;
1626        skb->pkt_type = PACKET_HOST;
1627        skb->protocol = eth_type_trans(skb, dev);
1628        skb->mark = 0;
1629        secpath_reset(skb);
1630        nf_reset(skb);
1631        return netif_rx(skb);
1632}
1633EXPORT_SYMBOL_GPL(dev_forward_skb);
1634
1635static inline int deliver_skb(struct sk_buff *skb,
1636                              struct packet_type *pt_prev,
1637                              struct net_device *orig_dev)
1638{
1639        if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1640                return -ENOMEM;
1641        atomic_inc(&skb->users);
1642        return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1643}
1644
1645static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1646{
1647        if (!ptype->af_packet_priv || !skb->sk)
1648                return false;
1649
1650        if (ptype->id_match)
1651                return ptype->id_match(ptype, skb->sk);
1652        else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1653                return true;
1654
1655        return false;
1656}
1657
1658/*
1659 *      Support routine. Sends outgoing frames to any network
1660 *      taps currently in use.
1661 */
1662
1663static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1664{
1665        struct packet_type *ptype;
1666        struct sk_buff *skb2 = NULL;
1667        struct packet_type *pt_prev = NULL;
1668
1669        rcu_read_lock();
1670        list_for_each_entry_rcu(ptype, &ptype_all, list) {
1671                /* Never send packets back to the socket
1672                 * they originated from - MvS (miquels@drinkel.ow.org)
1673                 */
1674                if ((ptype->dev == dev || !ptype->dev) &&
1675                    (!skb_loop_sk(ptype, skb))) {
1676                        if (pt_prev) {
1677                                deliver_skb(skb2, pt_prev, skb->dev);
1678                                pt_prev = ptype;
1679                                continue;
1680                        }
1681
1682                        skb2 = skb_clone(skb, GFP_ATOMIC);
1683                        if (!skb2)
1684                                break;
1685
1686                        net_timestamp_set(skb2);
1687
1688                        /* skb->nh should be correctly
1689                           set by sender, so that the second statement is
1690                           just protection against buggy protocols.
1691                         */
1692                        skb_reset_mac_header(skb2);
1693
1694                        if (skb_network_header(skb2) < skb2->data ||
1695                            skb2->network_header > skb2->tail) {
1696                                net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1697                                                     ntohs(skb2->protocol),
1698                                                     dev->name);
1699                                skb_reset_network_header(skb2);
1700                        }
1701
1702                        skb2->transport_header = skb2->network_header;
1703                        skb2->pkt_type = PACKET_OUTGOING;
1704                        pt_prev = ptype;
1705                }
1706        }
1707        if (pt_prev)
1708                pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1709        rcu_read_unlock();
1710}
1711
1712/**
1713 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1714 * @dev: Network device
1715 * @txq: number of queues available
1716 *
1717 * If real_num_tx_queues is changed the tc mappings may no longer be
1718 * valid. To resolve this verify the tc mapping remains valid and if
1719 * not NULL the mapping. With no priorities mapping to this
1720 * offset/count pair it will no longer be used. In the worst case TC0
1721 * is invalid nothing can be done so disable priority mappings. If is
1722 * expected that drivers will fix this mapping if they can before
1723 * calling netif_set_real_num_tx_queues.
1724 */
1725static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1726{
1727        int i;
1728        struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1729
1730        /* If TC0 is invalidated disable TC mapping */
1731        if (tc->offset + tc->count > txq) {
1732                pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1733                dev->num_tc = 0;
1734                return;
1735        }
1736
1737        /* Invalidated prio to tc mappings set to TC0 */
1738        for (i = 1; i < TC_BITMASK + 1; i++) {
1739                int q = netdev_get_prio_tc_map(dev, i);
1740
1741                tc = &dev->tc_to_txq[q];
1742                if (tc->offset + tc->count > txq) {
1743                        pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1744                                i, q);
1745                        netdev_set_prio_tc_map(dev, i, 0);
1746                }
1747        }
1748}
1749
1750/*
1751 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1752 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1753 */
1754int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1755{
1756        int rc;
1757
1758        if (txq < 1 || txq > dev->num_tx_queues)
1759                return -EINVAL;
1760
1761        if (dev->reg_state == NETREG_REGISTERED ||
1762            dev->reg_state == NETREG_UNREGISTERING) {
1763                ASSERT_RTNL();
1764
1765                rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
1766                                                  txq);
1767                if (rc)
1768                        return rc;
1769
1770                if (dev->num_tc)
1771                        netif_setup_tc(dev, txq);
1772
1773                if (txq < dev->real_num_tx_queues)
1774                        qdisc_reset_all_tx_gt(dev, txq);
1775        }
1776
1777        dev->real_num_tx_queues = txq;
1778        return 0;
1779}
1780EXPORT_SYMBOL(netif_set_real_num_tx_queues);
1781
1782#ifdef CONFIG_RPS
1783/**
1784 *      netif_set_real_num_rx_queues - set actual number of RX queues used
1785 *      @dev: Network device
1786 *      @rxq: Actual number of RX queues
1787 *
1788 *      This must be called either with the rtnl_lock held or before
1789 *      registration of the net device.  Returns 0 on success, or a
1790 *      negative error code.  If called before registration, it always
1791 *      succeeds.
1792 */
1793int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
1794{
1795        int rc;
1796
1797        if (rxq < 1 || rxq > dev->num_rx_queues)
1798                return -EINVAL;
1799
1800        if (dev->reg_state == NETREG_REGISTERED) {
1801                ASSERT_RTNL();
1802
1803                rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
1804                                                  rxq);
1805                if (rc)
1806                        return rc;
1807        }
1808
1809        dev->real_num_rx_queues = rxq;
1810        return 0;
1811}
1812EXPORT_SYMBOL(netif_set_real_num_rx_queues);
1813#endif
1814
1815/**
1816 * netif_get_num_default_rss_queues - default number of RSS queues
1817 *
1818 * This routine should set an upper limit on the number of RSS queues
1819 * used by default by multiqueue devices.
1820 */
1821int netif_get_num_default_rss_queues(void)
1822{
1823        return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
1824}
1825EXPORT_SYMBOL(netif_get_num_default_rss_queues);
1826
1827static inline void __netif_reschedule(struct Qdisc *q)
1828{
1829        struct softnet_data *sd;
1830        unsigned long flags;
1831
1832        local_irq_save(flags);
1833        sd = &__get_cpu_var(softnet_data);
1834        q->next_sched = NULL;
1835        *sd->output_queue_tailp = q;
1836        sd->output_queue_tailp = &q->next_sched;
1837        raise_softirq_irqoff(NET_TX_SOFTIRQ);
1838        local_irq_restore(flags);
1839}
1840
1841void __netif_schedule(struct Qdisc *q)
1842{
1843        if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1844                __netif_reschedule(q);
1845}
1846EXPORT_SYMBOL(__netif_schedule);
1847
1848void dev_kfree_skb_irq(struct sk_buff *skb)
1849{
1850        if (atomic_dec_and_test(&skb->users)) {
1851                struct softnet_data *sd;
1852                unsigned long flags;
1853
1854                local_irq_save(flags);
1855                sd = &__get_cpu_var(softnet_data);
1856                skb->next = sd->completion_queue;
1857                sd->completion_queue = skb;
1858                raise_softirq_irqoff(NET_TX_SOFTIRQ);
1859                local_irq_restore(flags);
1860        }
1861}
1862EXPORT_SYMBOL(dev_kfree_skb_irq);
1863
1864void dev_kfree_skb_any(struct sk_buff *skb)
1865{
1866        if (in_irq() || irqs_disabled())
1867                dev_kfree_skb_irq(skb);
1868        else
1869                dev_kfree_skb(skb);
1870}
1871EXPORT_SYMBOL(dev_kfree_skb_any);
1872
1873
1874/**
1875 * netif_device_detach - mark device as removed
1876 * @dev: network device
1877 *
1878 * Mark device as removed from system and therefore no longer available.
1879 */
1880void netif_device_detach(struct net_device *dev)
1881{
1882        if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1883            netif_running(dev)) {
1884                netif_tx_stop_all_queues(dev);
1885        }
1886}
1887EXPORT_SYMBOL(netif_device_detach);
1888
1889/**
1890 * netif_device_attach - mark device as attached
1891 * @dev: network device
1892 *
1893 * Mark device as attached from system and restart if needed.
1894 */
1895void netif_device_attach(struct net_device *dev)
1896{
1897        if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1898            netif_running(dev)) {
1899                netif_tx_wake_all_queues(dev);
1900                __netdev_watchdog_up(dev);
1901        }
1902}
1903EXPORT_SYMBOL(netif_device_attach);
1904
1905static void skb_warn_bad_offload(const struct sk_buff *skb)
1906{
1907        static const netdev_features_t null_features = 0;
1908        struct net_device *dev = skb->dev;
1909        const char *driver = "";
1910
1911        if (dev && dev->dev.parent)
1912                driver = dev_driver_string(dev->dev.parent);
1913
1914        WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
1915             "gso_type=%d ip_summed=%d\n",
1916             driver, dev ? &dev->features : &null_features,
1917             skb->sk ? &skb->sk->sk_route_caps : &null_features,
1918             skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
1919             skb_shinfo(skb)->gso_type, skb->ip_summed);
1920}
1921
1922/*
1923 * Invalidate hardware checksum when packet is to be mangled, and
1924 * complete checksum manually on outgoing path.
1925 */
1926int skb_checksum_help(struct sk_buff *skb)
1927{
1928        __wsum csum;
1929        int ret = 0, offset;
1930
1931        if (skb->ip_summed == CHECKSUM_COMPLETE)
1932                goto out_set_summed;
1933
1934        if (unlikely(skb_shinfo(skb)->gso_size)) {
1935                skb_warn_bad_offload(skb);
1936                return -EINVAL;
1937        }
1938
1939        offset = skb_checksum_start_offset(skb);
1940        BUG_ON(offset >= skb_headlen(skb));
1941        csum = skb_checksum(skb, offset, skb->len - offset, 0);
1942
1943        offset += skb->csum_offset;
1944        BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1945
1946        if (skb_cloned(skb) &&
1947            !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1948                ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1949                if (ret)
1950                        goto out;
1951        }
1952
1953        *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1954out_set_summed:
1955        skb->ip_summed = CHECKSUM_NONE;
1956out:
1957        return ret;
1958}
1959EXPORT_SYMBOL(skb_checksum_help);
1960
1961/**
1962 *      skb_gso_segment - Perform segmentation on skb.
1963 *      @skb: buffer to segment
1964 *      @features: features for the output path (see dev->features)
1965 *
1966 *      This function segments the given skb and returns a list of segments.
1967 *
1968 *      It may return NULL if the skb requires no segmentation.  This is
1969 *      only possible when GSO is used for verifying header integrity.
1970 */
1971struct sk_buff *skb_gso_segment(struct sk_buff *skb,
1972        netdev_features_t features)
1973{
1974        struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1975        struct packet_type *ptype;
1976        __be16 type = skb->protocol;
1977        int vlan_depth = ETH_HLEN;
1978        int err;
1979
1980        while (type == htons(ETH_P_8021Q)) {
1981                struct vlan_hdr *vh;
1982
1983                if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
1984                        return ERR_PTR(-EINVAL);
1985
1986                vh = (struct vlan_hdr *)(skb->data + vlan_depth);
1987                type = vh->h_vlan_encapsulated_proto;
1988                vlan_depth += VLAN_HLEN;
1989        }
1990
1991        skb_reset_mac_header(skb);
1992        skb->mac_len = skb->network_header - skb->mac_header;
1993        __skb_pull(skb, skb->mac_len);
1994
1995        if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1996                skb_warn_bad_offload(skb);
1997
1998                if (skb_header_cloned(skb) &&
1999                    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2000                        return ERR_PTR(err);
2001        }
2002
2003        rcu_read_lock();
2004        list_for_each_entry_rcu(ptype,
2005                        &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2006                if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
2007                        if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2008                                err = ptype->gso_send_check(skb);
2009                                segs = ERR_PTR(err);
2010                                if (err || skb_gso_ok(skb, features))
2011                                        break;
2012                                __skb_push(skb, (skb->data -
2013                                                 skb_network_header(skb)));
2014                        }
2015                        segs = ptype->gso_segment(skb, features);
2016                        break;
2017                }
2018        }
2019        rcu_read_unlock();
2020
2021        __skb_push(skb, skb->data - skb_mac_header(skb));
2022
2023        return segs;
2024}
2025EXPORT_SYMBOL(skb_gso_segment);
2026
2027/* Take action when hardware reception checksum errors are detected. */
2028#ifdef CONFIG_BUG
2029void netdev_rx_csum_fault(struct net_device *dev)
2030{
2031        if (net_ratelimit()) {
2032                pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2033                dump_stack();
2034        }
2035}
2036EXPORT_SYMBOL(netdev_rx_csum_fault);
2037#endif
2038
2039/* Actually, we should eliminate this check as soon as we know, that:
2040 * 1. IOMMU is present and allows to map all the memory.
2041 * 2. No high memory really exists on this machine.
2042 */
2043
2044static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2045{
2046#ifdef CONFIG_HIGHMEM
2047        int i;
2048        if (!(dev->features & NETIF_F_HIGHDMA)) {
2049                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2050                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2051                        if (PageHighMem(skb_frag_page(frag)))
2052                                return 1;
2053                }
2054        }
2055
2056        if (PCI_DMA_BUS_IS_PHYS) {
2057                struct device *pdev = dev->dev.parent;
2058
2059                if (!pdev)
2060                        return 0;
2061                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2062                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2063                        dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2064                        if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2065                                return 1;
2066                }
2067        }
2068#endif
2069        return 0;
2070}
2071
2072struct dev_gso_cb {
2073        void (*destructor)(struct sk_buff *skb);
2074};
2075
2076#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2077
2078static void dev_gso_skb_destructor(struct sk_buff *skb)
2079{
2080        struct dev_gso_cb *cb;
2081
2082        do {
2083                struct sk_buff *nskb = skb->next;
2084
2085                skb->next = nskb->next;
2086                nskb->next = NULL;
2087                kfree_skb(nskb);
2088        } while (skb->next);
2089
2090        cb = DEV_GSO_CB(skb);
2091        if (cb->destructor)
2092                cb->destructor(skb);
2093}
2094
2095/**
2096 *      dev_gso_segment - Perform emulated hardware segmentation on skb.
2097 *      @skb: buffer to segment
2098 *      @features: device features as applicable to this skb
2099 *
2100 *      This function segments the given skb and stores the list of segments
2101 *      in skb->next.
2102 */
2103static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2104{
2105        struct sk_buff *segs;
2106
2107        segs = skb_gso_segment(skb, features);
2108
2109        /* Verifying header integrity only. */
2110        if (!segs)
2111                return 0;
2112
2113        if (IS_ERR(segs))
2114                return PTR_ERR(segs);
2115
2116        skb->next = segs;
2117        DEV_GSO_CB(skb)->destructor = skb->destructor;
2118        skb->destructor = dev_gso_skb_destructor;
2119
2120        return 0;
2121}
2122
2123static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
2124{
2125        return ((features & NETIF_F_GEN_CSUM) ||
2126                ((features & NETIF_F_V4_CSUM) &&
2127                 protocol == htons(ETH_P_IP)) ||
2128                ((features & NETIF_F_V6_CSUM) &&
2129                 protocol == htons(ETH_P_IPV6)) ||
2130                ((features & NETIF_F_FCOE_CRC) &&
2131                 protocol == htons(ETH_P_FCOE)));
2132}
2133
2134static netdev_features_t harmonize_features(struct sk_buff *skb,
2135        __be16 protocol, netdev_features_t features)
2136{
2137        if (skb->ip_summed != CHECKSUM_NONE &&
2138            !can_checksum_protocol(features, protocol)) {
2139                features &= ~NETIF_F_ALL_CSUM;
2140                features &= ~NETIF_F_SG;
2141        } else if (illegal_highdma(skb->dev, skb)) {
2142                features &= ~NETIF_F_SG;
2143        }
2144
2145        return features;
2146}
2147
2148netdev_features_t netif_skb_features(struct sk_buff *skb)
2149{
2150        __be16 protocol = skb->protocol;
2151        netdev_features_t features = skb->dev->features;
2152
2153        if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2154                features &= ~NETIF_F_GSO_MASK;
2155
2156        if (protocol == htons(ETH_P_8021Q)) {
2157                struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2158                protocol = veh->h_vlan_encapsulated_proto;
2159        } else if (!vlan_tx_tag_present(skb)) {
2160                return harmonize_features(skb, protocol, features);
2161        }
2162
2163        features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
2164
2165        if (protocol != htons(ETH_P_8021Q)) {
2166                return harmonize_features(skb, protocol, features);
2167        } else {
2168                features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
2169                                NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
2170                return harmonize_features(skb, protocol, features);
2171        }
2172}
2173EXPORT_SYMBOL(netif_skb_features);
2174
2175/*
2176 * Returns true if either:
2177 *      1. skb has frag_list and the device doesn't support FRAGLIST, or
2178 *      2. skb is fragmented and the device does not support SG, or if
2179 *         at least one of fragments is in highmem and device does not
2180 *         support DMA from it.
2181 */
2182static inline int skb_needs_linearize(struct sk_buff *skb,
2183                                      int features)
2184{
2185        return skb_is_nonlinear(skb) &&
2186                        ((skb_has_frag_list(skb) &&
2187                                !(features & NETIF_F_FRAGLIST)) ||
2188                        (skb_shinfo(skb)->nr_frags &&
2189                                !(features & NETIF_F_SG)));
2190}
2191
2192int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2193                        struct netdev_queue *txq)
2194{
2195        const struct net_device_ops *ops = dev->netdev_ops;
2196        int rc = NETDEV_TX_OK;
2197        unsigned int skb_len;
2198
2199        if (likely(!skb->next)) {
2200                netdev_features_t features;
2201
2202                /*
2203                 * If device doesn't need skb->dst, release it right now while
2204                 * its hot in this cpu cache
2205                 */
2206                if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2207                        skb_dst_drop(skb);
2208
2209                if (!list_empty(&ptype_all))
2210                        dev_queue_xmit_nit(skb, dev);
2211
2212                features = netif_skb_features(skb);
2213
2214                if (vlan_tx_tag_present(skb) &&
2215                    !(features & NETIF_F_HW_VLAN_TX)) {
2216                        skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2217                        if (unlikely(!skb))
2218                                goto out;
2219
2220                        skb->vlan_tci = 0;
2221                }
2222
2223                if (netif_needs_gso(skb, features)) {
2224                        if (unlikely(dev_gso_segment(skb, features)))
2225                                goto out_kfree_skb;
2226                        if (skb->next)
2227                                goto gso;
2228                } else {
2229                        if (skb_needs_linearize(skb, features) &&
2230                            __skb_linearize(skb))
2231                                goto out_kfree_skb;
2232
2233                        /* If packet is not checksummed and device does not
2234                         * support checksumming for this protocol, complete
2235                         * checksumming here.
2236                         */
2237                        if (skb->ip_summed == CHECKSUM_PARTIAL) {
2238                                skb_set_transport_header(skb,
2239                                        skb_checksum_start_offset(skb));
2240                                if (!(features & NETIF_F_ALL_CSUM) &&
2241                                     skb_checksum_help(skb))
2242                                        goto out_kfree_skb;
2243                        }
2244                }
2245
2246                skb_len = skb->len;
2247                rc = ops->ndo_start_xmit(skb, dev);
2248                trace_net_dev_xmit(skb, rc, dev, skb_len);
2249                if (rc == NETDEV_TX_OK)
2250                        txq_trans_update(txq);
2251                return rc;
2252        }
2253
2254gso:
2255        do {
2256                struct sk_buff *nskb = skb->next;
2257
2258                skb->next = nskb->next;
2259                nskb->next = NULL;
2260
2261                /*
2262                 * If device doesn't need nskb->dst, release it right now while
2263                 * its hot in this cpu cache
2264                 */
2265                if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2266                        skb_dst_drop(nskb);
2267
2268                skb_len = nskb->len;
2269                rc = ops->ndo_start_xmit(nskb, dev);
2270                trace_net_dev_xmit(nskb, rc, dev, skb_len);
2271                if (unlikely(rc != NETDEV_TX_OK)) {
2272                        if (rc & ~NETDEV_TX_MASK)
2273                                goto out_kfree_gso_skb;
2274                        nskb->next = skb->next;
2275                        skb->next = nskb;
2276                        return rc;
2277                }
2278                txq_trans_update(txq);
2279                if (unlikely(netif_xmit_stopped(txq) && skb->next))
2280                        return NETDEV_TX_BUSY;
2281        } while (skb->next);
2282
2283out_kfree_gso_skb:
2284        if (likely(skb->next == NULL))
2285                skb->destructor = DEV_GSO_CB(skb)->destructor;
2286out_kfree_skb:
2287        kfree_skb(skb);
2288out:
2289        return rc;
2290}
2291
2292static u32 hashrnd __read_mostly;
2293
2294/*
2295 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2296 * to be used as a distribution range.
2297 */
2298u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2299                  unsigned int num_tx_queues)
2300{
2301        u32 hash;
2302        u16 qoffset = 0;
2303        u16 qcount = num_tx_queues;
2304
2305        if (skb_rx_queue_recorded(skb)) {
2306                hash = skb_get_rx_queue(skb);
2307                while (unlikely(hash >= num_tx_queues))
2308                        hash -= num_tx_queues;
2309                return hash;
2310        }
2311
2312        if (dev->num_tc) {
2313                u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2314                qoffset = dev->tc_to_txq[tc].offset;
2315                qcount = dev->tc_to_txq[tc].count;
2316        }
2317
2318        if (skb->sk && skb->sk->sk_hash)
2319                hash = skb->sk->sk_hash;
2320        else
2321                hash = (__force u16) skb->protocol;
2322        hash = jhash_1word(hash, hashrnd);
2323
2324        return (u16) (((u64) hash * qcount) >> 32) + qoffset;
2325}
2326EXPORT_SYMBOL(__skb_tx_hash);
2327
2328static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2329{
2330        if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2331                net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2332                                     dev->name, queue_index,
2333                                     dev->real_num_tx_queues);
2334                return 0;
2335        }
2336        return queue_index;
2337}
2338
2339static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2340{
2341#ifdef CONFIG_XPS
2342        struct xps_dev_maps *dev_maps;
2343        struct xps_map *map;
2344        int queue_index = -1;
2345
2346        rcu_read_lock();
2347        dev_maps = rcu_dereference(dev->xps_maps);
2348        if (dev_maps) {
2349                map = rcu_dereference(
2350                    dev_maps->cpu_map[raw_smp_processor_id()]);
2351                if (map) {
2352                        if (map->len == 1)
2353                                queue_index = map->queues[0];
2354                        else {
2355                                u32 hash;
2356                                if (skb->sk && skb->sk->sk_hash)
2357                                        hash = skb->sk->sk_hash;
2358                                else
2359                                        hash = (__force u16) skb->protocol ^
2360                                            skb->rxhash;
2361                                hash = jhash_1word(hash, hashrnd);
2362                                queue_index = map->queues[
2363                                    ((u64)hash * map->len) >> 32];
2364                        }
2365                        if (unlikely(queue_index >= dev->real_num_tx_queues))
2366                                queue_index = -1;
2367                }
2368        }
2369        rcu_read_unlock();
2370
2371        return queue_index;
2372#else
2373        return -1;
2374#endif
2375}
2376
2377static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2378                                        struct sk_buff *skb)
2379{
2380        int queue_index;
2381        const struct net_device_ops *ops = dev->netdev_ops;
2382
2383        if (dev->real_num_tx_queues == 1)
2384                queue_index = 0;
2385        else if (ops->ndo_select_queue) {
2386                queue_index = ops->ndo_select_queue(dev, skb);
2387                queue_index = dev_cap_txqueue(dev, queue_index);
2388        } else {
2389                struct sock *sk = skb->sk;
2390                queue_index = sk_tx_queue_get(sk);
2391
2392                if (queue_index < 0 || skb->ooo_okay ||
2393                    queue_index >= dev->real_num_tx_queues) {
2394                        int old_index = queue_index;
2395
2396                        queue_index = get_xps_queue(dev, skb);
2397                        if (queue_index < 0)
2398                                queue_index = skb_tx_hash(dev, skb);
2399
2400                        if (queue_index != old_index && sk) {
2401                                struct dst_entry *dst =
2402                                    rcu_dereference_check(sk->sk_dst_cache, 1);
2403
2404                                if (dst && skb_dst(skb) == dst)
2405                                        sk_tx_queue_set(sk, queue_index);
2406                        }
2407                }
2408        }
2409
2410        skb_set_queue_mapping(skb, queue_index);
2411        return netdev_get_tx_queue(dev, queue_index);
2412}
2413
2414static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2415                                 struct net_device *dev,
2416                                 struct netdev_queue *txq)
2417{
2418        spinlock_t *root_lock = qdisc_lock(q);
2419        bool contended;
2420        int rc;
2421
2422        qdisc_skb_cb(skb)->pkt_len = skb->len;
2423        qdisc_calculate_pkt_len(skb, q);
2424        /*
2425         * Heuristic to force contended enqueues to serialize on a
2426         * separate lock before trying to get qdisc main lock.
2427         * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2428         * and dequeue packets faster.
2429         */
2430        contended = qdisc_is_running(q);
2431        if (unlikely(contended))
2432                spin_lock(&q->busylock);
2433
2434        spin_lock(root_lock);
2435        if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2436                kfree_skb(skb);
2437                rc = NET_XMIT_DROP;
2438        } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2439                   qdisc_run_begin(q)) {
2440                /*
2441                 * This is a work-conserving queue; there are no old skbs
2442                 * waiting to be sent out; and the qdisc is not running -
2443                 * xmit the skb directly.
2444                 */
2445                if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2446                        skb_dst_force(skb);
2447
2448                qdisc_bstats_update(q, skb);
2449
2450                if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2451                        if (unlikely(contended)) {
2452                                spin_unlock(&q->busylock);
2453                                contended = false;
2454                        }
2455                        __qdisc_run(q);
2456                } else
2457                        qdisc_run_end(q);
2458
2459                rc = NET_XMIT_SUCCESS;
2460        } else {
2461                skb_dst_force(skb);
2462                rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2463                if (qdisc_run_begin(q)) {
2464                        if (unlikely(contended)) {
2465                                spin_unlock(&q->busylock);
2466                                contended = false;
2467                        }
2468                        __qdisc_run(q);
2469                }
2470        }
2471        spin_unlock(root_lock);
2472        if (unlikely(contended))
2473                spin_unlock(&q->busylock);
2474        return rc;
2475}
2476
2477#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2478static void skb_update_prio(struct sk_buff *skb)
2479{
2480        struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2481
2482        if (!skb->priority && skb->sk && map) {
2483                unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2484
2485                if (prioidx < map->priomap_len)
2486                        skb->priority = map->priomap[prioidx];
2487        }
2488}
2489#else
2490#define skb_update_prio(skb)
2491#endif
2492
2493static DEFINE_PER_CPU(int, xmit_recursion);
2494#define RECURSION_LIMIT 10
2495
2496/**
2497 *      dev_loopback_xmit - loop back @skb
2498 *      @skb: buffer to transmit
2499 */
2500int dev_loopback_xmit(struct sk_buff *skb)
2501{
2502        skb_reset_mac_header(skb);
2503        __skb_pull(skb, skb_network_offset(skb));
2504        skb->pkt_type = PACKET_LOOPBACK;
2505        skb->ip_summed = CHECKSUM_UNNECESSARY;
2506        WARN_ON(!skb_dst(skb));
2507        skb_dst_force(skb);
2508        netif_rx_ni(skb);
2509        return 0;
2510}
2511EXPORT_SYMBOL(dev_loopback_xmit);
2512
2513/**
2514 *      dev_queue_xmit - transmit a buffer
2515 *      @skb: buffer to transmit
2516 *
2517 *      Queue a buffer for transmission to a network device. The caller must
2518 *      have set the device and priority and built the buffer before calling
2519 *      this function. The function can be called from an interrupt.
2520 *
2521 *      A negative errno code is returned on a failure. A success does not
2522 *      guarantee the frame will be transmitted as it may be dropped due
2523 *      to congestion or traffic shaping.
2524 *
2525 * -----------------------------------------------------------------------------------
2526 *      I notice this method can also return errors from the queue disciplines,
2527 *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
2528 *      be positive.
2529 *
2530 *      Regardless of the return value, the skb is consumed, so it is currently
2531 *      difficult to retry a send to this method.  (You can bump the ref count
2532 *      before sending to hold a reference for retry if you are careful.)
2533 *
2534 *      When calling this method, interrupts MUST be enabled.  This is because
2535 *      the BH enable code must have IRQs enabled so that it will not deadlock.
2536 *          --BLG
2537 */
2538int dev_queue_xmit(struct sk_buff *skb)
2539{
2540        struct net_device *dev = skb->dev;
2541        struct netdev_queue *txq;
2542        struct Qdisc *q;
2543        int rc = -ENOMEM;
2544
2545        /* Disable soft irqs for various locks below. Also
2546         * stops preemption for RCU.
2547         */
2548        rcu_read_lock_bh();
2549
2550        skb_update_prio(skb);
2551
2552        txq = dev_pick_tx(dev, skb);
2553        q = rcu_dereference_bh(txq->qdisc);
2554
2555#ifdef CONFIG_NET_CLS_ACT
2556        skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2557#endif
2558        trace_net_dev_queue(skb);
2559        if (q->enqueue) {
2560                rc = __dev_xmit_skb(skb, q, dev, txq);
2561                goto out;
2562        }
2563
2564        /* The device has no queue. Common case for software devices:
2565           loopback, all the sorts of tunnels...
2566
2567           Really, it is unlikely that netif_tx_lock protection is necessary
2568           here.  (f.e. loopback and IP tunnels are clean ignoring statistics
2569           counters.)
2570           However, it is possible, that they rely on protection
2571           made by us here.
2572
2573           Check this and shot the lock. It is not prone from deadlocks.
2574           Either shot noqueue qdisc, it is even simpler 8)
2575         */
2576        if (dev->flags & IFF_UP) {
2577                int cpu = smp_processor_id(); /* ok because BHs are off */
2578
2579                if (txq->xmit_lock_owner != cpu) {
2580
2581                        if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2582                                goto recursion_alert;
2583
2584                        HARD_TX_LOCK(dev, txq, cpu);
2585
2586                        if (!netif_xmit_stopped(txq)) {
2587                                __this_cpu_inc(xmit_recursion);
2588                                rc = dev_hard_start_xmit(skb, dev, txq);
2589                                __this_cpu_dec(xmit_recursion);
2590                                if (dev_xmit_complete(rc)) {
2591                                        HARD_TX_UNLOCK(dev, txq);
2592                                        goto out;
2593                                }
2594                        }
2595                        HARD_TX_UNLOCK(dev, txq);
2596                        net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2597                                             dev->name);
2598                } else {
2599                        /* Recursion is detected! It is possible,
2600                         * unfortunately
2601                         */
2602recursion_alert:
2603                        net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2604                                             dev->name);
2605                }
2606        }
2607
2608        rc = -ENETDOWN;
2609        rcu_read_unlock_bh();
2610
2611        kfree_skb(skb);
2612        return rc;
2613out:
2614        rcu_read_unlock_bh();
2615        return rc;
2616}
2617EXPORT_SYMBOL(dev_queue_xmit);
2618
2619
2620/*=======================================================================
2621                        Receiver routines
2622  =======================================================================*/
2623
2624int netdev_max_backlog __read_mostly = 1000;
2625int netdev_tstamp_prequeue __read_mostly = 1;
2626int netdev_budget __read_mostly = 300;
2627int weight_p __read_mostly = 64;            /* old backlog weight */
2628
2629/* Called with irq disabled */
2630static inline void ____napi_schedule(struct softnet_data *sd,
2631                                     struct napi_struct *napi)
2632{
2633        list_add_tail(&napi->poll_list, &sd->poll_list);
2634        __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2635}
2636
2637/*
2638 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2639 * and src/dst port numbers.  Sets rxhash in skb to non-zero hash value
2640 * on success, zero indicates no valid hash.  Also, sets l4_rxhash in skb
2641 * if hash is a canonical 4-tuple hash over transport ports.
2642 */
2643void __skb_get_rxhash(struct sk_buff *skb)
2644{
2645        struct flow_keys keys;
2646        u32 hash;
2647
2648        if (!skb_flow_dissect(skb, &keys))
2649                return;
2650
2651        if (keys.ports)
2652                skb->l4_rxhash = 1;
2653
2654        /* get a consistent hash (same value on both flow directions) */
2655        if (((__force u32)keys.dst < (__force u32)keys.src) ||
2656            (((__force u32)keys.dst == (__force u32)keys.src) &&
2657             ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
2658                swap(keys.dst, keys.src);
2659                swap(keys.port16[0], keys.port16[1]);
2660        }
2661
2662        hash = jhash_3words((__force u32)keys.dst,
2663                            (__force u32)keys.src,
2664                            (__force u32)keys.ports, hashrnd);
2665        if (!hash)
2666                hash = 1;
2667
2668        skb->rxhash = hash;
2669}
2670EXPORT_SYMBOL(__skb_get_rxhash);
2671
2672#ifdef CONFIG_RPS
2673
2674/* One global table that all flow-based protocols share. */
2675struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2676EXPORT_SYMBOL(rps_sock_flow_table);
2677
2678struct static_key rps_needed __read_mostly;
2679
2680static struct rps_dev_flow *
2681set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2682            struct rps_dev_flow *rflow, u16 next_cpu)
2683{
2684        if (next_cpu != RPS_NO_CPU) {
2685#ifdef CONFIG_RFS_ACCEL
2686                struct netdev_rx_queue *rxqueue;
2687                struct rps_dev_flow_table *flow_table;
2688                struct rps_dev_flow *old_rflow;
2689                u32 flow_id;
2690                u16 rxq_index;
2691                int rc;
2692
2693                /* Should we steer this flow to a different hardware queue? */
2694                if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2695                    !(dev->features & NETIF_F_NTUPLE))
2696                        goto out;
2697                rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2698                if (rxq_index == skb_get_rx_queue(skb))
2699                        goto out;
2700
2701                rxqueue = dev->_rx + rxq_index;
2702                flow_table = rcu_dereference(rxqueue->rps_flow_table);
2703                if (!flow_table)
2704                        goto out;
2705                flow_id = skb->rxhash & flow_table->mask;
2706                rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2707                                                        rxq_index, flow_id);
2708                if (rc < 0)
2709                        goto out;
2710                old_rflow = rflow;
2711                rflow = &flow_table->flows[flow_id];
2712                rflow->filter = rc;
2713                if (old_rflow->filter == rflow->filter)
2714                        old_rflow->filter = RPS_NO_FILTER;
2715        out:
2716#endif
2717                rflow->last_qtail =
2718                        per_cpu(softnet_data, next_cpu).input_queue_head;
2719        }
2720
2721        rflow->cpu = next_cpu;
2722        return rflow;
2723}
2724
2725/*
2726 * get_rps_cpu is called from netif_receive_skb and returns the target
2727 * CPU from the RPS map of the receiving queue for a given skb.
2728 * rcu_read_lock must be held on entry.
2729 */
2730static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2731                       struct rps_dev_flow **rflowp)
2732{
2733        struct netdev_rx_queue *rxqueue;
2734        struct rps_map *map;
2735        struct rps_dev_flow_table *flow_table;
2736        struct rps_sock_flow_table *sock_flow_table;
2737        int cpu = -1;
2738        u16 tcpu;
2739
2740        if (skb_rx_queue_recorded(skb)) {
2741                u16 index = skb_get_rx_queue(skb);
2742                if (unlikely(index >= dev->real_num_rx_queues)) {
2743                        WARN_ONCE(dev->real_num_rx_queues > 1,
2744                                  "%s received packet on queue %u, but number "
2745                                  "of RX queues is %u\n",
2746                                  dev->name, index, dev->real_num_rx_queues);
2747                        goto done;
2748                }
2749                rxqueue = dev->_rx + index;
2750        } else
2751                rxqueue = dev->_rx;
2752
2753        map = rcu_dereference(rxqueue->rps_map);
2754        if (map) {
2755                if (map->len == 1 &&
2756                    !rcu_access_pointer(rxqueue->rps_flow_table)) {
2757                        tcpu = map->cpus[0];
2758                        if (cpu_online(tcpu))
2759                                cpu = tcpu;
2760                        goto done;
2761                }
2762        } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
2763                goto done;
2764        }
2765
2766        skb_reset_network_header(skb);
2767        if (!skb_get_rxhash(skb))
2768                goto done;
2769
2770        flow_table = rcu_dereference(rxqueue->rps_flow_table);
2771        sock_flow_table = rcu_dereference(rps_sock_flow_table);
2772        if (flow_table && sock_flow_table) {
2773                u16 next_cpu;
2774                struct rps_dev_flow *rflow;
2775
2776                rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2777                tcpu = rflow->cpu;
2778
2779                next_cpu = sock_flow_table->ents[skb->rxhash &
2780                    sock_flow_table->mask];
2781
2782                /*
2783                 * If the desired CPU (where last recvmsg was done) is
2784                 * different from current CPU (one in the rx-queue flow
2785                 * table entry), switch if one of the following holds:
2786                 *   - Current CPU is unset (equal to RPS_NO_CPU).
2787                 *   - Current CPU is offline.
2788                 *   - The current CPU's queue tail has advanced beyond the
2789                 *     last packet that was enqueued using this table entry.
2790                 *     This guarantees that all previous packets for the flow
2791                 *     have been dequeued, thus preserving in order delivery.
2792                 */
2793                if (unlikely(tcpu != next_cpu) &&
2794                    (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2795                     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2796                      rflow->last_qtail)) >= 0)) {
2797                        tcpu = next_cpu;
2798                        rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
2799                }
2800
2801                if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2802                        *rflowp = rflow;
2803                        cpu = tcpu;
2804                        goto done;
2805                }
2806        }
2807
2808        if (map) {
2809                tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
2810
2811                if (cpu_online(tcpu)) {
2812                        cpu = tcpu;
2813                        goto done;
2814                }
2815        }
2816
2817done:
2818        return cpu;
2819}
2820
2821#ifdef CONFIG_RFS_ACCEL
2822
2823/**
2824 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
2825 * @dev: Device on which the filter was set
2826 * @rxq_index: RX queue index
2827 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
2828 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
2829 *
2830 * Drivers that implement ndo_rx_flow_steer() should periodically call
2831 * this function for each installed filter and remove the filters for
2832 * which it returns %true.
2833 */
2834bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
2835                         u32 flow_id, u16 filter_id)
2836{
2837        struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
2838        struct rps_dev_flow_table *flow_table;
2839        struct rps_dev_flow *rflow;
2840        bool expire = true;
2841        int cpu;
2842
2843        rcu_read_lock();
2844        flow_table = rcu_dereference(rxqueue->rps_flow_table);
2845        if (flow_table && flow_id <= flow_table->mask) {
2846                rflow = &flow_table->flows[flow_id];
2847                cpu = ACCESS_ONCE(rflow->cpu);
2848                if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
2849                    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
2850                           rflow->last_qtail) <
2851                     (int)(10 * flow_table->mask)))
2852                        expire = false;
2853        }
2854        rcu_read_unlock();
2855        return expire;
2856}
2857EXPORT_SYMBOL(rps_may_expire_flow);
2858
2859#endif /* CONFIG_RFS_ACCEL */
2860
2861/* Called from hardirq (IPI) context */
2862static void rps_trigger_softirq(void *data)
2863{
2864        struct softnet_data *sd = data;
2865
2866        ____napi_schedule(sd, &sd->backlog);
2867        sd->received_rps++;
2868}
2869
2870#endif /* CONFIG_RPS */
2871
2872/*
2873 * Check if this softnet_data structure is another cpu one
2874 * If yes, queue it to our IPI list and return 1
2875 * If no, return 0
2876 */
2877static int rps_ipi_queued(struct softnet_data *sd)
2878{
2879#ifdef CONFIG_RPS
2880        struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2881
2882        if (sd != mysd) {
2883                sd->rps_ipi_next = mysd->rps_ipi_list;
2884                mysd->rps_ipi_list = sd;
2885
2886                __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2887                return 1;
2888        }
2889#endif /* CONFIG_RPS */
2890        return 0;
2891}
2892
2893/*
2894 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2895 * queue (may be a remote CPU queue).
2896 */
2897static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2898                              unsigned int *qtail)
2899{
2900        struct softnet_data *sd;
2901        unsigned long flags;
2902
2903        sd = &per_cpu(softnet_data, cpu);
2904
2905        local_irq_save(flags);
2906
2907        rps_lock(sd);
2908        if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
2909                if (skb_queue_len(&sd->input_pkt_queue)) {
2910enqueue:
2911                        __skb_queue_tail(&sd->input_pkt_queue, skb);
2912                        input_queue_tail_incr_save(sd, qtail);
2913                        rps_unlock(sd);
2914                        local_irq_restore(flags);
2915                        return NET_RX_SUCCESS;
2916                }
2917
2918                /* Schedule NAPI for backlog device
2919                 * We can use non atomic operation since we own the queue lock
2920                 */
2921                if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
2922                        if (!rps_ipi_queued(sd))
2923                                ____napi_schedule(sd, &sd->backlog);
2924                }
2925                goto enqueue;
2926        }
2927
2928        sd->dropped++;
2929        rps_unlock(sd);
2930
2931        local_irq_restore(flags);
2932
2933        atomic_long_inc(&skb->dev->rx_dropped);
2934        kfree_skb(skb);
2935        return NET_RX_DROP;
2936}
2937
2938/**
2939 *      netif_rx        -       post buffer to the network code
2940 *      @skb: buffer to post
2941 *
2942 *      This function receives a packet from a device driver and queues it for
2943 *      the upper (protocol) levels to process.  It always succeeds. The buffer
2944 *      may be dropped during processing for congestion control or by the
2945 *      protocol layers.
2946 *
2947 *      return values:
2948 *      NET_RX_SUCCESS  (no congestion)
2949 *      NET_RX_DROP     (packet was dropped)
2950 *
2951 */
2952
2953int netif_rx(struct sk_buff *skb)
2954{
2955        int ret;
2956
2957        /* if netpoll wants it, pretend we never saw it */
2958        if (netpoll_rx(skb))
2959                return NET_RX_DROP;
2960
2961        net_timestamp_check(netdev_tstamp_prequeue, skb);
2962
2963        trace_netif_rx(skb);
2964#ifdef CONFIG_RPS
2965        if (static_key_false(&rps_needed)) {
2966                struct rps_dev_flow voidflow, *rflow = &voidflow;
2967                int cpu;
2968
2969                preempt_disable();
2970                rcu_read_lock();
2971
2972                cpu = get_rps_cpu(skb->dev, skb, &rflow);
2973                if (cpu < 0)
2974                        cpu = smp_processor_id();
2975
2976                ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2977
2978                rcu_read_unlock();
2979                preempt_enable();
2980        } else
2981#endif
2982        {
2983                unsigned int qtail;
2984                ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
2985                put_cpu();
2986        }
2987        return ret;
2988}
2989EXPORT_SYMBOL(netif_rx);
2990
2991int netif_rx_ni(struct sk_buff *skb)
2992{
2993        int err;
2994
2995        preempt_disable();
2996        err = netif_rx(skb);
2997        if (local_softirq_pending())
2998                do_softirq();
2999        preempt_enable();
3000
3001        return err;
3002}
3003EXPORT_SYMBOL(netif_rx_ni);
3004
3005static void net_tx_action(struct softirq_action *h)
3006{
3007        struct softnet_data *sd = &__get_cpu_var(softnet_data);
3008
3009        if (sd->completion_queue) {
3010                struct sk_buff *clist;
3011
3012                local_irq_disable();
3013                clist = sd->completion_queue;
3014                sd->completion_queue = NULL;
3015                local_irq_enable();
3016
3017                while (clist) {
3018                        struct sk_buff *skb = clist;
3019                        clist = clist->next;
3020
3021                        WARN_ON(atomic_read(&skb->users));
3022                        trace_kfree_skb(skb, net_tx_action);
3023                        __kfree_skb(skb);
3024                }
3025        }
3026
3027        if (sd->output_queue) {
3028                struct Qdisc *head;
3029
3030                local_irq_disable();
3031                head = sd->output_queue;
3032                sd->output_queue = NULL;
3033                sd->output_queue_tailp = &sd->output_queue;
3034                local_irq_enable();
3035
3036                while (head) {
3037                        struct Qdisc *q = head;
3038                        spinlock_t *root_lock;
3039
3040                        head = head->next_sched;
3041
3042                        root_lock = qdisc_lock(q);
3043                        if (spin_trylock(root_lock)) {
3044                                smp_mb__before_clear_bit();
3045                                clear_bit(__QDISC_STATE_SCHED,
3046                                          &q->state);
3047                                qdisc_run(q);
3048                                spin_unlock(root_lock);
3049                        } else {
3050                                if (!test_bit(__QDISC_STATE_DEACTIVATED,
3051                                              &q->state)) {
3052                                        __netif_reschedule(q);
3053                                } else {
3054                                        smp_mb__before_clear_bit();
3055                                        clear_bit(__QDISC_STATE_SCHED,
3056                                                  &q->state);
3057                                }
3058                        }
3059                }
3060        }
3061}
3062
3063#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3064    (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3065/* This hook is defined here for ATM LANE */
3066int (*br_fdb_test_addr_hook)(struct net_device *dev,
3067                             unsigned char *addr) __read_mostly;
3068EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3069#endif
3070
3071#ifdef CONFIG_NET_CLS_ACT
3072/* TODO: Maybe we should just force sch_ingress to be compiled in
3073 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3074 * a compare and 2 stores extra right now if we dont have it on
3075 * but have CONFIG_NET_CLS_ACT
3076 * NOTE: This doesn't stop any functionality; if you dont have
3077 * the ingress scheduler, you just can't add policies on ingress.
3078 *
3079 */
3080static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3081{
3082        struct net_device *dev = skb->dev;
3083        u32 ttl = G_TC_RTTL(skb->tc_verd);
3084        int result = TC_ACT_OK;
3085        struct Qdisc *q;
3086
3087        if (unlikely(MAX_RED_LOOP < ttl++)) {
3088                net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3089                                     skb->skb_iif, dev->ifindex);
3090                return TC_ACT_SHOT;
3091        }
3092
3093        skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3094        skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3095
3096        q = rxq->qdisc;
3097        if (q != &noop_qdisc) {
3098                spin_lock(qdisc_lock(q));
3099                if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3100                        result = qdisc_enqueue_root(skb, q);
3101                spin_unlock(qdisc_lock(q));
3102        }
3103
3104        return result;
3105}
3106
3107static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3108                                         struct packet_type **pt_prev,
3109                                         int *ret, struct net_device *orig_dev)
3110{
3111        struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3112
3113        if (!rxq || rxq->qdisc == &noop_qdisc)
3114                goto out;
3115
3116        if (*pt_prev) {
3117                *ret = deliver_skb(skb, *pt_prev, orig_dev);
3118                *pt_prev = NULL;
3119        }
3120
3121        switch (ing_filter(skb, rxq)) {
3122        case TC_ACT_SHOT:
3123        case TC_ACT_STOLEN:
3124                kfree_skb(skb);
3125                return NULL;
3126        }
3127
3128out:
3129        skb->tc_verd = 0;
3130        return skb;
3131}
3132#endif
3133
3134/**
3135 *      netdev_rx_handler_register - register receive handler
3136 *      @dev: device to register a handler for
3137 *      @rx_handler: receive handler to register
3138 *      @rx_handler_data: data pointer that is used by rx handler
3139 *
3140 *      Register a receive hander for a device. This handler will then be
3141 *      called from __netif_receive_skb. A negative errno code is returned
3142 *      on a failure.
3143 *
3144 *      The caller must hold the rtnl_mutex.
3145 *
3146 *      For a general description of rx_handler, see enum rx_handler_result.
3147 */
3148int netdev_rx_handler_register(struct net_device *dev,
3149                               rx_handler_func_t *rx_handler,
3150                               void *rx_handler_data)
3151{
3152        ASSERT_RTNL();
3153
3154        if (dev->rx_handler)
3155                return -EBUSY;
3156
3157        rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3158        rcu_assign_pointer(dev->rx_handler, rx_handler);
3159
3160        return 0;
3161}
3162EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3163
3164/**
3165 *      netdev_rx_handler_unregister - unregister receive handler
3166 *      @dev: device to unregister a handler from
3167 *
3168 *      Unregister a receive hander from a device.
3169 *
3170 *      The caller must hold the rtnl_mutex.
3171 */
3172void netdev_rx_handler_unregister(struct net_device *dev)
3173{
3174
3175        ASSERT_RTNL();
3176        RCU_INIT_POINTER(dev->rx_handler, NULL);
3177        RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3178}
3179EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3180
3181/*
3182 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3183 * the special handling of PFMEMALLOC skbs.
3184 */
3185static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3186{
3187        switch (skb->protocol) {
3188        case __constant_htons(ETH_P_ARP):
3189        case __constant_htons(ETH_P_IP):
3190        case __constant_htons(ETH_P_IPV6):
3191        case __constant_htons(ETH_P_8021Q):
3192                return true;
3193        default:
3194                return false;
3195        }
3196}
3197
3198static int __netif_receive_skb(struct sk_buff *skb)
3199{
3200        struct packet_type *ptype, *pt_prev;
3201        rx_handler_func_t *rx_handler;
3202        struct net_device *orig_dev;
3203        struct net_device *null_or_dev;
3204        bool deliver_exact = false;
3205        int ret = NET_RX_DROP;
3206        __be16 type;
3207        unsigned long pflags = current->flags;
3208
3209        net_timestamp_check(!netdev_tstamp_prequeue, skb);
3210
3211        trace_netif_receive_skb(skb);
3212
3213        /*
3214         * PFMEMALLOC skbs are special, they should
3215         * - be delivered to SOCK_MEMALLOC sockets only
3216         * - stay away from userspace
3217         * - have bounded memory usage
3218         *
3219         * Use PF_MEMALLOC as this saves us from propagating the allocation
3220         * context down to all allocation sites.
3221         */
3222        if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3223                current->flags |= PF_MEMALLOC;
3224
3225        /* if we've gotten here through NAPI, check netpoll */
3226        if (netpoll_receive_skb(skb))
3227                goto out;
3228
3229        orig_dev = skb->dev;
3230
3231        skb_reset_network_header(skb);
3232        skb_reset_transport_header(skb);
3233        skb_reset_mac_len(skb);
3234
3235        pt_prev = NULL;
3236
3237        rcu_read_lock();
3238
3239another_round:
3240        skb->skb_iif = skb->dev->ifindex;
3241
3242        __this_cpu_inc(softnet_data.processed);
3243
3244        if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3245                skb = vlan_untag(skb);
3246                if (unlikely(!skb))
3247                        goto unlock;
3248        }
3249
3250#ifdef CONFIG_NET_CLS_ACT
3251        if (skb->tc_verd & TC_NCLS) {
3252                skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3253                goto ncls;
3254        }
3255#endif
3256
3257        if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3258                goto skip_taps;
3259
3260        list_for_each_entry_rcu(ptype, &ptype_all, list) {
3261                if (!ptype->dev || ptype->dev == skb->dev) {
3262                        if (pt_prev)
3263                                ret = deliver_skb(skb, pt_prev, orig_dev);
3264                        pt_prev = ptype;
3265                }
3266        }
3267
3268skip_taps:
3269#ifdef CONFIG_NET_CLS_ACT
3270        skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3271        if (!skb)
3272                goto unlock;
3273ncls:
3274#endif
3275
3276        if (sk_memalloc_socks() && skb_pfmemalloc(skb)
3277                                && !skb_pfmemalloc_protocol(skb))
3278                goto drop;
3279
3280        if (vlan_tx_tag_present(skb)) {
3281                if (pt_prev) {
3282                        ret = deliver_skb(skb, pt_prev, orig_dev);
3283                        pt_prev = NULL;
3284                }
3285                if (vlan_do_receive(&skb))
3286                        goto another_round;
3287                else if (unlikely(!skb))
3288                        goto unlock;
3289        }
3290
3291        rx_handler = rcu_dereference(skb->dev->rx_handler);
3292        if (rx_handler) {
3293                if (pt_prev) {
3294                        ret = deliver_skb(skb, pt_prev, orig_dev);
3295                        pt_prev = NULL;
3296                }
3297                switch (rx_handler(&skb)) {
3298                case RX_HANDLER_CONSUMED:
3299                        goto unlock;
3300                case RX_HANDLER_ANOTHER:
3301                        goto another_round;
3302                case RX_HANDLER_EXACT:
3303                        deliver_exact = true;
3304                case RX_HANDLER_PASS:
3305                        break;
3306                default:
3307                        BUG();
3308                }
3309        }
3310
3311        if (vlan_tx_nonzero_tag_present(skb))
3312                skb->pkt_type = PACKET_OTHERHOST;
3313
3314        /* deliver only exact match when indicated */
3315        null_or_dev = deliver_exact ? skb->dev : NULL;
3316
3317        type = skb->protocol;
3318        list_for_each_entry_rcu(ptype,
3319                        &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3320                if (ptype->type == type &&
3321                    (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3322                     ptype->dev == orig_dev)) {
3323                        if (pt_prev)
3324                                ret = deliver_skb(skb, pt_prev, orig_dev);
3325                        pt_prev = ptype;
3326                }
3327        }
3328
3329        if (pt_prev) {
3330                if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
3331                        goto drop;
3332                else
3333                        ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3334        } else {
3335drop:
3336                atomic_long_inc(&skb->dev->rx_dropped);
3337                kfree_skb(skb);
3338                /* Jamal, now you will not able to escape explaining
3339                 * me how you were going to use this. :-)
3340                 */
3341                ret = NET_RX_DROP;
3342        }
3343
3344unlock:
3345        rcu_read_unlock();
3346out:
3347        tsk_restore_flags(current, pflags, PF_MEMALLOC);
3348        return ret;
3349}
3350
3351/**
3352 *      netif_receive_skb - process receive buffer from network
3353 *      @skb: buffer to process
3354 *
3355 *      netif_receive_skb() is the main receive data processing function.
3356 *      It always succeeds. The buffer may be dropped during processing
3357 *      for congestion control or by the protocol layers.
3358 *
3359 *      This function may only be called from softirq context and interrupts
3360 *      should be enabled.
3361 *
3362 *      Return values (usually ignored):
3363 *      NET_RX_SUCCESS: no congestion
3364 *      NET_RX_DROP: packet was dropped
3365 */
3366int netif_receive_skb(struct sk_buff *skb)
3367{
3368        net_timestamp_check(netdev_tstamp_prequeue, skb);
3369
3370        if (skb_defer_rx_timestamp(skb))
3371                return NET_RX_SUCCESS;
3372
3373#ifdef CONFIG_RPS
3374        if (static_key_false(&rps_needed)) {
3375                struct rps_dev_flow voidflow, *rflow = &voidflow;
3376                int cpu, ret;
3377
3378                rcu_read_lock();
3379
3380                cpu = get_rps_cpu(skb->dev, skb, &rflow);
3381
3382                if (cpu >= 0) {
3383                        ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3384                        rcu_read_unlock();
3385                        return ret;
3386                }
3387                rcu_read_unlock();
3388        }
3389#endif
3390        return __netif_receive_skb(skb);
3391}
3392EXPORT_SYMBOL(netif_receive_skb);
3393
3394/* Network device is going away, flush any packets still pending
3395 * Called with irqs disabled.
3396 */
3397static void flush_backlog(void *arg)
3398{
3399        struct net_device *dev = arg;
3400        struct softnet_data *sd = &__get_cpu_var(softnet_data);
3401        struct sk_buff *skb, *tmp;
3402
3403        rps_lock(sd);
3404        skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3405                if (skb->dev == dev) {
3406                        __skb_unlink(skb, &sd->input_pkt_queue);
3407                        kfree_skb(skb);
3408                        input_queue_head_incr(sd);
3409                }
3410        }
3411        rps_unlock(sd);
3412
3413        skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3414                if (skb->dev == dev) {
3415                        __skb_unlink(skb, &sd->process_queue);
3416                        kfree_skb(skb);
3417                        input_queue_head_incr(sd);
3418                }
3419        }
3420}
3421
3422static int napi_gro_complete(struct sk_buff *skb)
3423{
3424        struct packet_type *ptype;
3425        __be16 type = skb->protocol;
3426        struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3427        int err = -ENOENT;
3428
3429        if (NAPI_GRO_CB(skb)->count == 1) {
3430                skb_shinfo(skb)->gso_size = 0;
3431                goto out;
3432        }
3433
3434        rcu_read_lock();
3435        list_for_each_entry_rcu(ptype, head, list) {
3436                if (ptype->type != type || ptype->dev || !ptype->gro_complete)
3437                        continue;
3438
3439                err = ptype->gro_complete(skb);
3440                break;
3441        }
3442        rcu_read_unlock();
3443
3444        if (err) {
3445                WARN_ON(&ptype->list == head);
3446                kfree_skb(skb);
3447                return NET_RX_SUCCESS;
3448        }
3449
3450out:
3451        return netif_receive_skb(skb);
3452}
3453
3454inline void napi_gro_flush(struct napi_struct *napi)
3455{
3456        struct sk_buff *skb, *next;
3457
3458        for (skb = napi->gro_list; skb; skb = next) {
3459                next = skb->next;
3460                skb->next = NULL;
3461                napi_gro_complete(skb);
3462        }
3463
3464        napi->gro_count = 0;
3465        napi->gro_list = NULL;
3466}
3467EXPORT_SYMBOL(napi_gro_flush);
3468
3469enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3470{
3471        struct sk_buff **pp = NULL;
3472        struct packet_type *ptype;
3473        __be16 type = skb->protocol;
3474        struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3475        int same_flow;
3476        int mac_len;
3477        enum gro_result ret;
3478
3479        if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
3480                goto normal;
3481
3482        if (skb_is_gso(skb) || skb_has_frag_list(skb))
3483                goto normal;
3484
3485        rcu_read_lock();
3486        list_for_each_entry_rcu(ptype, head, list) {
3487                if (ptype->type != type || ptype->dev || !ptype->gro_receive)
3488                        continue;
3489
3490                skb_set_network_header(skb, skb_gro_offset(skb));
3491                mac_len = skb->network_header - skb->mac_header;
3492                skb->mac_len = mac_len;
3493                NAPI_GRO_CB(skb)->same_flow = 0;
3494                NAPI_GRO_CB(skb)->flush = 0;
3495                NAPI_GRO_CB(skb)->free = 0;
3496
3497                pp = ptype->gro_receive(&napi->gro_list, skb);
3498                break;
3499        }
3500        rcu_read_unlock();
3501
3502        if (&ptype->list == head)
3503                goto normal;
3504
3505        same_flow = NAPI_GRO_CB(skb)->same_flow;
3506        ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
3507
3508        if (pp) {
3509                struct sk_buff *nskb = *pp;
3510
3511                *pp = nskb->next;
3512                nskb->next = NULL;
3513                napi_gro_complete(nskb);
3514                napi->gro_count--;
3515        }
3516
3517        if (same_flow)
3518                goto ok;
3519
3520        if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
3521                goto normal;
3522
3523        napi->gro_count++;
3524        NAPI_GRO_CB(skb)->count = 1;
3525        skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3526        skb->next = napi->gro_list;
3527        napi->gro_list = skb;
3528        ret = GRO_HELD;
3529
3530pull:
3531        if (skb_headlen(skb) < skb_gro_offset(skb)) {
3532                int grow = skb_gro_offset(skb) - skb_headlen(skb);
3533
3534                BUG_ON(skb->end - skb->tail < grow);
3535
3536                memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3537
3538                skb->tail += grow;
3539                skb->data_len -= grow;
3540
3541                skb_shinfo(skb)->frags[0].page_offset += grow;
3542                skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
3543
3544                if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
3545                        skb_frag_unref(skb, 0);
3546                        memmove(skb_shinfo(skb)->frags,
3547                                skb_shinfo(skb)->frags + 1,
3548                                --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
3549                }
3550        }
3551
3552ok:
3553        return ret;
3554
3555normal:
3556        ret = GRO_NORMAL;
3557        goto pull;
3558}
3559EXPORT_SYMBOL(dev_gro_receive);
3560
3561static inline gro_result_t
3562__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3563{
3564        struct sk_buff *p;
3565        unsigned int maclen = skb->dev->hard_header_len;
3566
3567        for (p = napi->gro_list; p; p = p->next) {
3568                unsigned long diffs;
3569
3570                diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3571                diffs |= p->vlan_tci ^ skb->vlan_tci;
3572                if (maclen == ETH_HLEN)
3573                        diffs |= compare_ether_header(skb_mac_header(p),
3574                                                      skb_gro_mac_header(skb));
3575                else if (!diffs)
3576                        diffs = memcmp(skb_mac_header(p),
3577                                       skb_gro_mac_header(skb),
3578                                       maclen);
3579                NAPI_GRO_CB(p)->same_flow = !diffs;
3580                NAPI_GRO_CB(p)->flush = 0;
3581        }
3582
3583        return dev_gro_receive(napi, skb);
3584}
3585
3586gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3587{
3588        switch (ret) {
3589        case GRO_NORMAL:
3590                if (netif_receive_skb(skb))
3591                        ret = GRO_DROP;
3592                break;
3593
3594        case GRO_DROP:
3595                kfree_skb(skb);
3596                break;
3597
3598        case GRO_MERGED_FREE:
3599                if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3600                        kmem_cache_free(skbuff_head_cache, skb);
3601                else
3602                        __kfree_skb(skb);
3603                break;
3604
3605        case GRO_HELD:
3606        case GRO_MERGED:
3607                break;
3608        }
3609
3610        return ret;
3611}
3612EXPORT_SYMBOL(napi_skb_finish);
3613
3614void skb_gro_reset_offset(struct sk_buff *skb)
3615{
3616        NAPI_GRO_CB(skb)->data_offset = 0;
3617        NAPI_GRO_CB(skb)->frag0 = NULL;
3618        NAPI_GRO_CB(skb)->frag0_len = 0;
3619
3620        if (skb->mac_header == skb->tail &&
3621            !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
3622                NAPI_GRO_CB(skb)->frag0 =
3623                        skb_frag_address(&skb_shinfo(skb)->frags[0]);
3624                NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]);
3625        }
3626}
3627EXPORT_SYMBOL(skb_gro_reset_offset);
3628
3629gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3630{
3631        skb_gro_reset_offset(skb);
3632
3633        return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
3634}
3635EXPORT_SYMBOL(napi_gro_receive);
3636
3637static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3638{
3639        __skb_pull(skb, skb_headlen(skb));
3640        /* restore the reserve we had after netdev_alloc_skb_ip_align() */
3641        skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3642        skb->vlan_tci = 0;
3643        skb->dev = napi->dev;
3644        skb->skb_iif = 0;
3645
3646        napi->skb = skb;
3647}
3648
3649struct sk_buff *napi_get_frags(struct napi_struct *napi)
3650{
3651        struct sk_buff *skb = napi->skb;
3652
3653        if (!skb) {
3654                skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3655                if (skb)
3656                        napi->skb = skb;
3657        }
3658        return skb;
3659}
3660EXPORT_SYMBOL(napi_get_frags);
3661
3662gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3663                               gro_result_t ret)
3664{
3665        switch (ret) {
3666        case GRO_NORMAL:
3667        case GRO_HELD:
3668                skb->protocol = eth_type_trans(skb, skb->dev);
3669
3670                if (ret == GRO_HELD)
3671                        skb_gro_pull(skb, -ETH_HLEN);
3672                else if (netif_receive_skb(skb))
3673                        ret = GRO_DROP;
3674                break;
3675
3676        case GRO_DROP:
3677        case GRO_MERGED_FREE:
3678                napi_reuse_skb(napi, skb);
3679                break;
3680
3681        case GRO_MERGED:
3682                break;
3683        }
3684
3685        return ret;
3686}
3687EXPORT_SYMBOL(napi_frags_finish);
3688
3689static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3690{
3691        struct sk_buff *skb = napi->skb;
3692        struct ethhdr *eth;
3693        unsigned int hlen;
3694        unsigned int off;
3695
3696        napi->skb = NULL;
3697
3698        skb_reset_mac_header(skb);
3699        skb_gro_reset_offset(skb);
3700
3701        off = skb_gro_offset(skb);
3702        hlen = off + sizeof(*eth);
3703        eth = skb_gro_header_fast(skb, off);
3704        if (skb_gro_header_hard(skb, hlen)) {
3705                eth = skb_gro_header_slow(skb, hlen, off);
3706                if (unlikely(!eth)) {
3707                        napi_reuse_skb(napi, skb);
3708                        skb = NULL;
3709                        goto out;
3710                }
3711        }
3712
3713        skb_gro_pull(skb, sizeof(*eth));
3714
3715        /*
3716         * This works because the only protocols we care about don't require
3717         * special handling.  We'll fix it up properly at the end.
3718         */
3719        skb->protocol = eth->h_proto;
3720
3721out:
3722        return skb;
3723}
3724
3725gro_result_t napi_gro_frags(struct napi_struct *napi)
3726{
3727        struct sk_buff *skb = napi_frags_skb(napi);
3728
3729        if (!skb)
3730                return GRO_DROP;
3731
3732        return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3733}
3734EXPORT_SYMBOL(napi_gro_frags);
3735
3736/*
3737 * net_rps_action sends any pending IPI's for rps.
3738 * Note: called with local irq disabled, but exits with local irq enabled.
3739 */
3740static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3741{
3742#ifdef CONFIG_RPS
3743        struct softnet_data *remsd = sd->rps_ipi_list;
3744
3745        if (remsd) {
3746                sd->rps_ipi_list = NULL;
3747
3748                local_irq_enable();
3749
3750                /* Send pending IPI's to kick RPS processing on remote cpus. */
3751                while (remsd) {
3752                        struct softnet_data *next = remsd->rps_ipi_next;
3753
3754                        if (cpu_online(remsd->cpu))
3755                                __smp_call_function_single(remsd->cpu,
3756                                                           &remsd->csd, 0);
3757                        remsd = next;
3758                }
3759        } else
3760#endif
3761                local_irq_enable();
3762}
3763
3764static int process_backlog(struct napi_struct *napi, int quota)
3765{
3766        int work = 0;
3767        struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
3768
3769#ifdef CONFIG_RPS
3770        /* Check if we have pending ipi, its better to send them now,
3771         * not waiting net_rx_action() end.
3772         */
3773        if (sd->rps_ipi_list) {
3774                local_irq_disable();
3775                net_rps_action_and_irq_enable(sd);
3776        }
3777#endif
3778        napi->weight = weight_p;
3779        local_irq_disable();
3780        while (work < quota) {
3781                struct sk_buff *skb;
3782                unsigned int qlen;
3783
3784                while ((skb = __skb_dequeue(&sd->process_queue))) {
3785                        local_irq_enable();
3786                        __netif_receive_skb(skb);
3787                        local_irq_disable();
3788                        input_queue_head_incr(sd);
3789                        if (++work >= quota) {
3790                                local_irq_enable();
3791                                return work;
3792                        }
3793                }
3794
3795                rps_lock(sd);
3796                qlen = skb_queue_len(&sd->input_pkt_queue);
3797                if (qlen)
3798                        skb_queue_splice_tail_init(&sd->input_pkt_queue,
3799                                                   &sd->process_queue);
3800
3801                if (qlen < quota - work) {
3802                        /*
3803                         * Inline a custom version of __napi_complete().
3804                         * only current cpu owns and manipulates this napi,
3805                         * and NAPI_STATE_SCHED is the only possible flag set on backlog.
3806                         * we can use a plain write instead of clear_bit(),
3807                         * and we dont need an smp_mb() memory barrier.
3808                         */
3809                        list_del(&napi->poll_list);
3810                        napi->state = 0;
3811
3812                        quota = work + qlen;
3813                }
3814                rps_unlock(sd);
3815        }
3816        local_irq_enable();
3817
3818        return work;
3819}
3820
3821/**
3822 * __napi_schedule - schedule for receive
3823 * @n: entry to schedule
3824 *
3825 * The entry's receive function will be scheduled to run
3826 */
3827void __napi_schedule(struct napi_struct *n)
3828{
3829        unsigned long flags;
3830
3831        local_irq_save(flags);
3832        ____napi_schedule(&__get_cpu_var(softnet_data), n);
3833        local_irq_restore(flags);
3834}
3835EXPORT_SYMBOL(__napi_schedule);
3836
3837void __napi_complete(struct napi_struct *n)
3838{
3839        BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3840        BUG_ON(n->gro_list);
3841
3842        list_del(&n->poll_list);
3843        smp_mb__before_clear_bit();
3844        clear_bit(NAPI_STATE_SCHED, &n->state);
3845}
3846EXPORT_SYMBOL(__napi_complete);
3847
3848void napi_complete(struct napi_struct *n)
3849{
3850        unsigned long flags;
3851
3852        /*
3853         * don't let napi dequeue from the cpu poll list
3854         * just in case its running on a different cpu
3855         */
3856        if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3857                return;
3858
3859        napi_gro_flush(n);
3860        local_irq_save(flags);
3861        __napi_complete(n);
3862        local_irq_restore(flags);
3863}
3864EXPORT_SYMBOL(napi_complete);
3865
3866void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3867                    int (*poll)(struct napi_struct *, int), int weight)
3868{
3869        INIT_LIST_HEAD(&napi->poll_list);
3870        napi->gro_count = 0;
3871        napi->gro_list = NULL;
3872        napi->skb = NULL;
3873        napi->poll = poll;
3874        napi->weight = weight;
3875        list_add(&napi->dev_list, &dev->napi_list);
3876        napi->dev = dev;
3877#ifdef CONFIG_NETPOLL
3878        spin_lock_init(&napi->poll_lock);
3879        napi->poll_owner = -1;
3880#endif
3881        set_bit(NAPI_STATE_SCHED, &napi->state);
3882}
3883EXPORT_SYMBOL(netif_napi_add);
3884
3885void netif_napi_del(struct napi_struct *napi)
3886{
3887        struct sk_buff *skb, *next;
3888
3889        list_del_init(&napi->dev_list);
3890        napi_free_frags(napi);
3891
3892        for (skb = napi->gro_list; skb; skb = next) {
3893                next = skb->next;
3894                skb->next = NULL;
3895                kfree_skb(skb);
3896        }
3897
3898        napi->gro_list = NULL;
3899        napi->gro_count = 0;
3900}
3901EXPORT_SYMBOL(netif_napi_del);
3902
3903static void net_rx_action(struct softirq_action *h)
3904{
3905        struct softnet_data *sd = &__get_cpu_var(softnet_data);
3906        unsigned long time_limit = jiffies + 2;
3907        int budget = netdev_budget;
3908        void *have;
3909
3910        local_irq_disable();
3911
3912        while (!list_empty(&sd->poll_list)) {
3913                struct napi_struct *n;
3914                int work, weight;
3915
3916                /* If softirq window is exhuasted then punt.
3917                 * Allow this to run for 2 jiffies since which will allow
3918                 * an average latency of 1.5/HZ.
3919                 */
3920                if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
3921                        goto softnet_break;
3922
3923                local_irq_enable();
3924
3925                /* Even though interrupts have been re-enabled, this
3926                 * access is safe because interrupts can only add new
3927                 * entries to the tail of this list, and only ->poll()
3928                 * calls can remove this head entry from the list.
3929                 */
3930                n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
3931
3932                have = netpoll_poll_lock(n);
3933
3934                weight = n->weight;
3935
3936                /* This NAPI_STATE_SCHED test is for avoiding a race
3937                 * with netpoll's poll_napi().  Only the entity which
3938                 * obtains the lock and sees NAPI_STATE_SCHED set will
3939                 * actually make the ->poll() call.  Therefore we avoid
3940                 * accidentally calling ->poll() when NAPI is not scheduled.
3941                 */
3942                work = 0;
3943                if (test_bit(NAPI_STATE_SCHED, &n->state)) {
3944                        work = n->poll(n, weight);
3945                        trace_napi_poll(n);
3946                }
3947
3948                WARN_ON_ONCE(work > weight);
3949
3950                budget -= work;
3951
3952                local_irq_disable();
3953
3954                /* Drivers must not modify the NAPI state if they
3955                 * consume the entire weight.  In such cases this code
3956                 * still "owns" the NAPI instance and therefore can
3957                 * move the instance around on the list at-will.
3958                 */
3959                if (unlikely(work == weight)) {
3960                        if (unlikely(napi_disable_pending(n))) {
3961                                local_irq_enable();
3962                                napi_complete(n);
3963                                local_irq_disable();
3964                        } else
3965                                list_move_tail(&n->poll_list, &sd->poll_list);
3966                }
3967
3968                netpoll_poll_unlock(have);
3969        }
3970out:
3971        net_rps_action_and_irq_enable(sd);
3972
3973#ifdef CONFIG_NET_DMA
3974        /*
3975         * There may not be any more sk_buffs coming right now, so push
3976         * any pending DMA copies to hardware
3977         */
3978        dma_issue_pending_all();
3979#endif
3980
3981        return;
3982
3983softnet_break:
3984        sd->time_squeeze++;
3985        __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3986        goto out;
3987}
3988
3989static gifconf_func_t *gifconf_list[NPROTO];
3990
3991/**
3992 *      register_gifconf        -       register a SIOCGIF handler
3993 *      @family: Address family
3994 *      @gifconf: Function handler
3995 *
3996 *      Register protocol dependent address dumping routines. The handler
3997 *      that is passed must not be freed or reused until it has been replaced
3998 *      by another handler.
3999 */
4000int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
4001{
4002        if (family >= NPROTO)
4003                return -EINVAL;
4004        gifconf_list[family] = gifconf;
4005        return 0;
4006}
4007EXPORT_SYMBOL(register_gifconf);
4008
4009
4010/*
4011 *      Map an interface index to its name (SIOCGIFNAME)
4012 */
4013
4014/*
4015 *      We need this ioctl for efficient implementation of the
4016 *      if_indextoname() function required by the IPv6 API.  Without
4017 *      it, we would have to search all the interfaces to find a
4018 *      match.  --pb
4019 */
4020
4021static int dev_ifname(struct net *net, struct ifreq __user *arg)
4022{
4023        struct net_device *dev;
4024        struct ifreq ifr;
4025
4026        /*
4027         *      Fetch the caller's info block.
4028         */
4029
4030        if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4031                return -EFAULT;
4032
4033        rcu_read_lock();
4034        dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
4035        if (!dev) {
4036                rcu_read_unlock();
4037                return -ENODEV;
4038        }
4039
4040        strcpy(ifr.ifr_name, dev->name);
4041        rcu_read_unlock();
4042
4043        if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
4044                return -EFAULT;
4045        return 0;
4046}
4047
4048/*
4049 *      Perform a SIOCGIFCONF call. This structure will change
4050 *      size eventually, and there is nothing I can do about it.
4051 *      Thus we will need a 'compatibility mode'.
4052 */
4053
4054static int dev_ifconf(struct net *net, char __user *arg)
4055{
4056        struct ifconf ifc;
4057        struct net_device *dev;
4058        char __user *pos;
4059        int len;
4060        int total;
4061        int i;
4062
4063        /*
4064         *      Fetch the caller's info block.
4065         */
4066
4067        if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
4068                return -EFAULT;
4069
4070        pos = ifc.ifc_buf;
4071        len = ifc.ifc_len;
4072
4073        /*
4074         *      Loop over the interfaces, and write an info block for each.
4075         */
4076
4077        total = 0;
4078        for_each_netdev(net, dev) {
4079                for (i = 0; i < NPROTO; i++) {
4080                        if (gifconf_list[i]) {
4081                                int done;
4082                                if (!pos)
4083                                        done = gifconf_list[i](dev, NULL, 0);
4084                                else
4085                                        done = gifconf_list[i](dev, pos + total,
4086                                                               len - total);
4087                                if (done < 0)
4088                                        return -EFAULT;
4089                                total += done;
4090                        }
4091                }
4092        }
4093
4094        /*
4095         *      All done.  Write the updated control block back to the caller.
4096         */
4097        ifc.ifc_len = total;
4098
4099        /*
4100         *      Both BSD and Solaris return 0 here, so we do too.
4101         */
4102        return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
4103}
4104
4105#ifdef CONFIG_PROC_FS
4106
4107#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
4108
4109#define get_bucket(x) ((x) >> BUCKET_SPACE)
4110#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4111#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4112
4113static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
4114{
4115        struct net *net = seq_file_net(seq);
4116        struct net_device *dev;
4117        struct hlist_node *p;
4118        struct hlist_head *h;
4119        unsigned int count = 0, offset = get_offset(*pos);
4120
4121        h = &net->dev_name_head[get_bucket(*pos)];
4122        hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
4123                if (++count == offset)
4124                        return dev;
4125        }
4126
4127        return NULL;
4128}
4129
4130static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
4131{
4132        struct net_device *dev;
4133        unsigned int bucket;
4134
4135        do {
4136                dev = dev_from_same_bucket(seq, pos);
4137                if (dev)
4138                        return dev;
4139
4140                bucket = get_bucket(*pos) + 1;
4141                *pos = set_bucket_offset(bucket, 1);
4142        } while (bucket < NETDEV_HASHENTRIES);
4143
4144        return NULL;
4145}
4146
4147/*
4148 *      This is invoked by the /proc filesystem handler to display a device
4149 *      in detail.
4150 */
4151void *dev_seq_start(struct seq_file *seq, loff_t *pos)
4152        __acquires(RCU)
4153{
4154        rcu_read_lock();
4155        if (!*pos)
4156                return SEQ_START_TOKEN;
4157
4158        if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
4159                return NULL;
4160
4161        return dev_from_bucket(seq, pos);
4162}
4163
4164void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4165{
4166        ++*pos;
4167        return dev_from_bucket(seq, pos);
4168}
4169
4170void dev_seq_stop(struct seq_file *seq, void *v)
4171        __releases(RCU)
4172{
4173        rcu_read_unlock();
4174}
4175
4176static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
4177{
4178        struct rtnl_link_stats64 temp;
4179        const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
4180
4181        seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
4182                   "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
4183                   dev->name, stats->rx_bytes, stats->rx_packets,
4184                   stats->rx_errors,
4185                   stats->rx_dropped + stats->rx_missed_errors,
4186                   stats->rx_fifo_errors,
4187                   stats->rx_length_errors + stats->rx_over_errors +
4188                    stats->rx_crc_errors + stats->rx_frame_errors,
4189                   stats->rx_compressed, stats->multicast,
4190                   stats->tx_bytes, stats->tx_packets,
4191                   stats->tx_errors, stats->tx_dropped,
4192                   stats->tx_fifo_errors, stats->collisions,
4193                   stats->tx_carrier_errors +
4194                    stats->tx_aborted_errors +
4195                    stats->tx_window_errors +
4196                    stats->tx_heartbeat_errors,
4197                   stats->tx_compressed);
4198}
4199
4200/*
4201 *      Called from the PROCfs module. This now uses the new arbitrary sized
4202 *      /proc/net interface to create /proc/net/dev
4203 */
4204static int dev_seq_show(struct seq_file *seq, void *v)
4205{
4206        if (v == SEQ_START_TOKEN)
4207                seq_puts(seq, "Inter-|   Receive                            "
4208                              "                    |  Transmit\n"
4209                              " face |bytes    packets errs drop fifo frame "
4210                              "compressed multicast|bytes    packets errs "
4211                              "drop fifo colls carrier compressed\n");
4212        else
4213                dev_seq_printf_stats(seq, v);
4214        return 0;
4215}
4216
4217static struct softnet_data *softnet_get_online(loff_t *pos)
4218{
4219        struct softnet_data *sd = NULL;
4220
4221        while (*pos < nr_cpu_ids)
4222                if (cpu_online(*pos)) {
4223                        sd = &per_cpu(softnet_data, *pos);
4224                        break;
4225                } else
4226                        ++*pos;
4227        return sd;
4228}
4229
4230static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
4231{
4232        return softnet_get_online(pos);
4233}
4234
4235static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4236{
4237        ++*pos;
4238        return softnet_get_online(pos);
4239}
4240
4241static void softnet_seq_stop(struct seq_file *seq, void *v)
4242{
4243}
4244
4245static int softnet_seq_show(struct seq_file *seq, void *v)
4246{
4247        struct softnet_data *sd = v;
4248
4249        seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
4250                   sd->processed, sd->dropped, sd->time_squeeze, 0,
4251                   0, 0, 0, 0, /* was fastroute */
4252                   sd->cpu_collision, sd->received_rps);
4253        return 0;
4254}
4255
4256static const struct seq_operations dev_seq_ops = {
4257        .start = dev_seq_start,
4258        .next  = dev_seq_next,
4259        .stop  = dev_seq_stop,
4260        .show  = dev_seq_show,
4261};
4262
4263static int dev_seq_open(struct inode *inode, struct file *file)
4264{
4265        return seq_open_net(inode, file, &dev_seq_ops,
4266                            sizeof(struct seq_net_private));
4267}
4268
4269static const struct file_operations dev_seq_fops = {
4270        .owner   = THIS_MODULE,
4271        .open    = dev_seq_open,
4272        .read    = seq_read,
4273        .llseek  = seq_lseek,
4274        .release = seq_release_net,
4275};
4276
4277static const struct seq_operations softnet_seq_ops = {
4278        .start = softnet_seq_start,
4279        .next  = softnet_seq_next,
4280        .stop  = softnet_seq_stop,
4281        .show  = softnet_seq_show,
4282};
4283
4284static int softnet_seq_open(struct inode *inode, struct file *file)
4285{
4286        return seq_open(file, &softnet_seq_ops);
4287}
4288
4289static const struct file_operations softnet_seq_fops = {
4290        .owner   = THIS_MODULE,
4291        .open    = softnet_seq_open,
4292        .read    = seq_read,
4293        .llseek  = seq_lseek,
4294        .release = seq_release,
4295};
4296
4297static void *ptype_get_idx(loff_t pos)
4298{
4299        struct packet_type *pt = NULL;
4300        loff_t i = 0;
4301        int t;
4302
4303        list_for_each_entry_rcu(pt, &ptype_all, list) {
4304                if (i == pos)
4305                        return pt;
4306                ++i;
4307        }
4308
4309        for (t = 0; t < PTYPE_HASH_SIZE; t++) {
4310                list_for_each_entry_rcu(pt, &ptype_base[t], list) {
4311                        if (i == pos)
4312                                return pt;
4313                        ++i;
4314                }
4315        }
4316        return NULL;
4317}
4318
4319static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
4320        __acquires(RCU)
4321{
4322        rcu_read_lock();
4323        return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
4324}
4325
4326static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4327{
4328        struct packet_type *pt;
4329        struct list_head *nxt;
4330        int hash;
4331
4332        ++*pos;
4333        if (v == SEQ_START_TOKEN)
4334                return ptype_get_idx(0);
4335
4336        pt = v;
4337        nxt = pt->list.next;
4338        if (pt->type == htons(ETH_P_ALL)) {
4339                if (nxt != &ptype_all)
4340                        goto found;
4341                hash = 0;
4342                nxt = ptype_base[0].next;
4343        } else
4344                hash = ntohs(pt->type) & PTYPE_HASH_MASK;
4345
4346        while (nxt == &ptype_base[hash]) {
4347                if (++hash >= PTYPE_HASH_SIZE)
4348                        return NULL;
4349                nxt = ptype_base[hash].next;
4350        }
4351found:
4352        return list_entry(nxt, struct packet_type, list);
4353}
4354
4355static void ptype_seq_stop(struct seq_file *seq, void *v)
4356        __releases(RCU)
4357{
4358        rcu_read_unlock();
4359}
4360
4361static int ptype_seq_show(struct seq_file *seq, void *v)
4362{
4363        struct packet_type *pt = v;
4364
4365        if (v == SEQ_START_TOKEN)
4366                seq_puts(seq, "Type Device      Function\n");
4367        else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
4368                if (pt->type == htons(ETH_P_ALL))
4369                        seq_puts(seq, "ALL ");
4370                else
4371                        seq_printf(seq, "%04x", ntohs(pt->type));
4372
4373                seq_printf(seq, " %-8s %pF\n",
4374                           pt->dev ? pt->dev->name : "", pt->func);
4375        }
4376
4377        return 0;
4378}
4379
4380static const struct seq_operations ptype_seq_ops = {
4381        .start = ptype_seq_start,
4382        .next  = ptype_seq_next,
4383        .stop  = ptype_seq_stop,
4384        .show  = ptype_seq_show,
4385};
4386
4387static int ptype_seq_open(struct inode *inode, struct file *file)
4388{
4389        return seq_open_net(inode, file, &ptype_seq_ops,
4390                        sizeof(struct seq_net_private));
4391}
4392
4393static const struct file_operations ptype_seq_fops = {
4394        .owner   = THIS_MODULE,
4395        .open    = ptype_seq_open,
4396        .read    = seq_read,
4397        .llseek  = seq_lseek,
4398        .release = seq_release_net,
4399};
4400
4401
4402static int __net_init dev_proc_net_init(struct net *net)
4403{
4404        int rc = -ENOMEM;
4405
4406        if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
4407                goto out;
4408        if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
4409                goto out_dev;
4410        if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
4411                goto out_softnet;
4412
4413        if (wext_proc_init(net))
4414                goto out_ptype;
4415        rc = 0;
4416out:
4417        return rc;
4418out_ptype:
4419        proc_net_remove(net, "ptype");
4420out_softnet:
4421        proc_net_remove(net, "softnet_stat");
4422out_dev:
4423        proc_net_remove(net, "dev");
4424        goto out;
4425}
4426
4427static void __net_exit dev_proc_net_exit(struct net *net)
4428{
4429        wext_proc_exit(net);
4430
4431        proc_net_remove(net, "ptype");
4432        proc_net_remove(net, "softnet_stat");
4433        proc_net_remove(net, "dev");
4434}
4435
4436static struct pernet_operations __net_initdata dev_proc_ops = {
4437        .init = dev_proc_net_init,
4438        .exit = dev_proc_net_exit,
4439};
4440
4441static int __init dev_proc_init(void)
4442{
4443        return register_pernet_subsys(&dev_proc_ops);
4444}
4445#else
4446#define dev_proc_init() 0
4447#endif  /* CONFIG_PROC_FS */
4448
4449
4450/**
4451 *      netdev_set_master       -       set up master pointer
4452 *      @slave: slave device
4453 *      @master: new master device
4454 *
4455 *      Changes the master device of the slave. Pass %NULL to break the
4456 *      bonding. The caller must hold the RTNL semaphore. On a failure
4457 *      a negative errno code is returned. On success the reference counts
4458 *      are adjusted and the function returns zero.
4459 */
4460int netdev_set_master(struct net_device *slave, struct net_device *master)
4461{
4462        struct net_device *old = slave->master;
4463
4464        ASSERT_RTNL();
4465
4466        if (master) {
4467                if (old)
4468                        return -EBUSY;
4469                dev_hold(master);
4470        }
4471
4472        slave->master = master;
4473
4474        if (old)
4475                dev_put(old);
4476        return 0;
4477}
4478EXPORT_SYMBOL(netdev_set_master);
4479
4480/**
4481 *      netdev_set_bond_master  -       set up bonding master/slave pair
4482 *      @slave: slave device
4483 *      @master: new master device
4484 *
4485 *      Changes the master device of the slave. Pass %NULL to break the
4486 *      bonding. The caller must hold the RTNL semaphore. On a failure
4487 *      a negative errno code is returned. On success %RTM_NEWLINK is sent
4488 *      to the routing socket and the function returns zero.
4489 */
4490int netdev_set_bond_master(struct net_device *slave, struct net_device *master)
4491{
4492        int err;
4493
4494        ASSERT_RTNL();
4495
4496        err = netdev_set_master(slave, master);
4497        if (err)
4498                return err;
4499        if (master)
4500                slave->flags |= IFF_SLAVE;
4501        else
4502                slave->flags &= ~IFF_SLAVE;
4503
4504        rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
4505        return 0;
4506}
4507EXPORT_SYMBOL(netdev_set_bond_master);
4508
4509static void dev_change_rx_flags(struct net_device *dev, int flags)
4510{
4511        const struct net_device_ops *ops = dev->netdev_ops;
4512
4513        if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4514                ops->ndo_change_rx_flags(dev, flags);
4515}
4516
4517static int __dev_set_promiscuity(struct net_device *dev, int inc)
4518{
4519        unsigned int old_flags = dev->flags;
4520        uid_t uid;
4521        gid_t gid;
4522
4523        ASSERT_RTNL();
4524
4525        dev->flags |= IFF_PROMISC;
4526        dev->promiscuity += inc;
4527        if (dev->promiscuity == 0) {
4528                /*
4529                 * Avoid overflow.
4530                 * If inc causes overflow, untouch promisc and return error.
4531                 */
4532                if (inc < 0)
4533                        dev->flags &= ~IFF_PROMISC;
4534                else {
4535                        dev->promiscuity -= inc;
4536                        pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4537                                dev->name);
4538                        return -EOVERFLOW;
4539                }
4540        }
4541        if (dev->flags != old_flags) {
4542                pr_info("device %s %s promiscuous mode\n",
4543                        dev->name,
4544                        dev->flags & IFF_PROMISC ? "entered" : "left");
4545                if (audit_enabled) {
4546                        current_uid_gid(&uid, &gid);
4547                        audit_log(current->audit_context, GFP_ATOMIC,
4548                                AUDIT_ANOM_PROMISCUOUS,
4549                                "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4550                                dev->name, (dev->flags & IFF_PROMISC),
4551                                (old_flags & IFF_PROMISC),
4552                                audit_get_loginuid(current),
4553                                uid, gid,
4554                                audit_get_sessionid(current));
4555                }
4556
4557                dev_change_rx_flags(dev, IFF_PROMISC);
4558        }
4559        return 0;
4560}
4561
4562/**
4563 *      dev_set_promiscuity     - update promiscuity count on a device
4564 *      @dev: device
4565 *      @inc: modifier
4566 *
4567 *      Add or remove promiscuity from a device. While the count in the device
4568 *      remains above zero the interface remains promiscuous. Once it hits zero
4569 *      the device reverts back to normal filtering operation. A negative inc
4570 *      value is used to drop promiscuity on the device.
4571 *      Return 0 if successful or a negative errno code on error.
4572 */
4573int dev_set_promiscuity(struct net_device *dev, int inc)
4574{
4575        unsigned int old_flags = dev->flags;
4576        int err;
4577
4578        err = __dev_set_promiscuity(dev, inc);
4579        if (err < 0)
4580                return err;
4581        if (dev->flags != old_flags)
4582                dev_set_rx_mode(dev);
4583        return err;
4584}
4585EXPORT_SYMBOL(dev_set_promiscuity);
4586
4587/**
4588 *      dev_set_allmulti        - update allmulti count on a device
4589 *      @dev: device
4590 *      @inc: modifier
4591 *
4592 *      Add or remove reception of all multicast frames to a device. While the
4593 *      count in the device remains above zero the interface remains listening
4594 *      to all interfaces. Once it hits zero the device reverts back to normal
4595 *      filtering operation. A negative @inc value is used to drop the counter
4596 *      when releasing a resource needing all multicasts.
4597 *      Return 0 if successful or a negative errno code on error.
4598 */
4599
4600int dev_set_allmulti(struct net_device *dev, int inc)
4601{
4602        unsigned int old_flags = dev->flags;
4603
4604        ASSERT_RTNL();
4605
4606        dev->flags |= IFF_ALLMULTI;
4607        dev->allmulti += inc;
4608        if (dev->allmulti == 0) {
4609                /*
4610                 * Avoid overflow.
4611                 * If inc causes overflow, untouch allmulti and return error.
4612                 */
4613                if (inc < 0)
4614                        dev->flags &= ~IFF_ALLMULTI;
4615                else {
4616                        dev->allmulti -= inc;
4617                        pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4618                                dev->name);
4619                        return -EOVERFLOW;
4620                }
4621        }
4622        if (dev->flags ^ old_flags) {
4623                dev_change_rx_flags(dev, IFF_ALLMULTI);
4624                dev_set_rx_mode(dev);
4625        }
4626        return 0;
4627}
4628EXPORT_SYMBOL(dev_set_allmulti);
4629
4630/*
4631 *      Upload unicast and multicast address lists to device and
4632 *      configure RX filtering. When the device doesn't support unicast
4633 *      filtering it is put in promiscuous mode while unicast addresses
4634 *      are present.
4635 */
4636void __dev_set_rx_mode(struct net_device *dev)
4637{
4638        const struct net_device_ops *ops = dev->netdev_ops;
4639
4640        /* dev_open will call this function so the list will stay sane. */
4641        if (!(dev->flags&IFF_UP))
4642                return;
4643
4644        if (!netif_device_present(dev))
4645                return;
4646
4647        if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4648                /* Unicast addresses changes may only happen under the rtnl,
4649                 * therefore calling __dev_set_promiscuity here is safe.
4650                 */
4651                if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4652                        __dev_set_promiscuity(dev, 1);
4653                        dev->uc_promisc = true;
4654                } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4655                        __dev_set_promiscuity(dev, -1);
4656                        dev->uc_promisc = false;
4657                }
4658        }
4659
4660        if (ops->ndo_set_rx_mode)
4661                ops->ndo_set_rx_mode(dev);
4662}
4663
4664void dev_set_rx_mode(struct net_device *dev)
4665{
4666        netif_addr_lock_bh(dev);
4667        __dev_set_rx_mode(dev);
4668        netif_addr_unlock_bh(dev);
4669}
4670
4671/**
4672 *      dev_get_flags - get flags reported to userspace
4673 *      @dev: device
4674 *
4675 *      Get the combination of flag bits exported through APIs to userspace.
4676 */
4677unsigned int dev_get_flags(const struct net_device *dev)
4678{
4679        unsigned int flags;
4680
4681        flags = (dev->flags & ~(IFF_PROMISC |
4682                                IFF_ALLMULTI |
4683                                IFF_RUNNING |
4684                                IFF_LOWER_UP |
4685                                IFF_DORMANT)) |
4686                (dev->gflags & (IFF_PROMISC |
4687                                IFF_ALLMULTI));
4688
4689        if (netif_running(dev)) {
4690                if (netif_oper_up(dev))
4691                        flags |= IFF_RUNNING;
4692                if (netif_carrier_ok(dev))
4693                        flags |= IFF_LOWER_UP;
4694                if (netif_dormant(dev))
4695                        flags |= IFF_DORMANT;
4696        }
4697
4698        return flags;
4699}
4700EXPORT_SYMBOL(dev_get_flags);
4701
4702int __dev_change_flags(struct net_device *dev, unsigned int flags)
4703{
4704        unsigned int old_flags = dev->flags;
4705        int ret;
4706
4707        ASSERT_RTNL();
4708
4709        /*
4710         *      Set the flags on our device.
4711         */
4712
4713        dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4714                               IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4715                               IFF_AUTOMEDIA)) |
4716                     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4717                                    IFF_ALLMULTI));
4718
4719        /*
4720         *      Load in the correct multicast list now the flags have changed.
4721         */
4722
4723        if ((old_flags ^ flags) & IFF_MULTICAST)
4724                dev_change_rx_flags(dev, IFF_MULTICAST);
4725
4726        dev_set_rx_mode(dev);
4727
4728        /*
4729         *      Have we downed the interface. We handle IFF_UP ourselves
4730         *      according to user attempts to set it, rather than blindly
4731         *      setting it.
4732         */
4733
4734        ret = 0;
4735        if ((old_flags ^ flags) & IFF_UP) {     /* Bit is different  ? */
4736                ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
4737
4738                if (!ret)
4739                        dev_set_rx_mode(dev);
4740        }
4741
4742        if ((flags ^ dev->gflags) & IFF_PROMISC) {
4743                int inc = (flags & IFF_PROMISC) ? 1 : -1;
4744
4745                dev->gflags ^= IFF_PROMISC;
4746                dev_set_promiscuity(dev, inc);
4747        }
4748
4749        /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4750           is important. Some (broken) drivers set IFF_PROMISC, when
4751           IFF_ALLMULTI is requested not asking us and not reporting.
4752         */
4753        if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4754                int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4755
4756                dev->gflags ^= IFF_ALLMULTI;
4757                dev_set_allmulti(dev, inc);
4758        }
4759
4760        return ret;
4761}
4762
4763void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4764{
4765        unsigned int changes = dev->flags ^ old_flags;
4766
4767        if (changes & IFF_UP) {
4768                if (dev->flags & IFF_UP)
4769                        call_netdevice_notifiers(NETDEV_UP, dev);
4770                else
4771                        call_netdevice_notifiers(NETDEV_DOWN, dev);
4772        }
4773
4774        if (dev->flags & IFF_UP &&
4775            (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4776                call_netdevice_notifiers(NETDEV_CHANGE, dev);
4777}
4778
4779/**
4780 *      dev_change_flags - change device settings
4781 *      @dev: device
4782 *      @flags: device state flags
4783 *
4784 *      Change settings on device based state flags. The flags are
4785 *      in the userspace exported format.
4786 */
4787int dev_change_flags(struct net_device *dev, unsigned int flags)
4788{
4789        int ret;
4790        unsigned int changes, old_flags = dev->flags;
4791
4792        ret = __dev_change_flags(dev, flags);
4793        if (ret < 0)
4794                return ret;
4795
4796        changes = old_flags ^ dev->flags;
4797        if (changes)
4798                rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4799
4800        __dev_notify_flags(dev, old_flags);
4801        return ret;
4802}
4803EXPORT_SYMBOL(dev_change_flags);
4804
4805/**
4806 *      dev_set_mtu - Change maximum transfer unit
4807 *      @dev: device
4808 *      @new_mtu: new transfer unit
4809 *
4810 *      Change the maximum transfer size of the network device.
4811 */
4812int dev_set_mtu(struct net_device *dev, int new_mtu)
4813{
4814        const struct net_device_ops *ops = dev->netdev_ops;
4815        int err;
4816
4817        if (new_mtu == dev->mtu)
4818                return 0;
4819
4820        /*      MTU must be positive.    */
4821        if (new_mtu < 0)
4822                return -EINVAL;
4823
4824        if (!netif_device_present(dev))
4825                return -ENODEV;
4826
4827        err = 0;
4828        if (ops->ndo_change_mtu)
4829                err = ops->ndo_change_mtu(dev, new_mtu);
4830        else
4831                dev->mtu = new_mtu;
4832
4833        if (!err && dev->flags & IFF_UP)
4834                call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4835        return err;
4836}
4837EXPORT_SYMBOL(dev_set_mtu);
4838
4839/**
4840 *      dev_set_group - Change group this device belongs to
4841 *      @dev: device
4842 *      @new_group: group this device should belong to
4843 */
4844void dev_set_group(struct net_device *dev, int new_group)
4845{
4846        dev->group = new_group;
4847}
4848EXPORT_SYMBOL(dev_set_group);
4849
4850/**
4851 *      dev_set_mac_address - Change Media Access Control Address
4852 *      @dev: device
4853 *      @sa: new address
4854 *
4855 *      Change the hardware (MAC) address of the device
4856 */
4857int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4858{
4859        const struct net_device_ops *ops = dev->netdev_ops;
4860        int err;
4861
4862        if (!ops->ndo_set_mac_address)
4863                return -EOPNOTSUPP;
4864        if (sa->sa_family != dev->type)
4865                return -EINVAL;
4866        if (!netif_device_present(dev))
4867                return -ENODEV;
4868        err = ops->ndo_set_mac_address(dev, sa);
4869        if (!err)
4870                call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4871        add_device_randomness(dev->dev_addr, dev->addr_len);
4872        return err;
4873}
4874EXPORT_SYMBOL(dev_set_mac_address);
4875
4876/*
4877 *      Perform the SIOCxIFxxx calls, inside rcu_read_lock()
4878 */
4879static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
4880{
4881        int err;
4882        struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
4883
4884        if (!dev)
4885                return -ENODEV;
4886
4887        switch (cmd) {
4888        case SIOCGIFFLAGS:      /* Get interface flags */
4889                ifr->ifr_flags = (short) dev_get_flags(dev);
4890                return 0;
4891
4892        case SIOCGIFMETRIC:     /* Get the metric on the interface
4893                                   (currently unused) */
4894                ifr->ifr_metric = 0;
4895                return 0;
4896
4897        case SIOCGIFMTU:        /* Get the MTU of a device */
4898                ifr->ifr_mtu = dev->mtu;
4899                return 0;
4900
4901        case SIOCGIFHWADDR:
4902                if (!dev->addr_len)
4903                        memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4904                else
4905                        memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4906                               min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4907                ifr->ifr_hwaddr.sa_family = dev->type;
4908                return 0;
4909
4910        case SIOCGIFSLAVE:
4911                err = -EINVAL;
4912                break;
4913
4914        case SIOCGIFMAP:
4915                ifr->ifr_map.mem_start = dev->mem_start;
4916                ifr->ifr_map.mem_end   = dev->mem_end;
4917                ifr->ifr_map.base_addr = dev->base_addr;
4918                ifr->ifr_map.irq       = dev->irq;
4919                ifr->ifr_map.dma       = dev->dma;
4920                ifr->ifr_map.port      = dev->if_port;
4921                return 0;
4922
4923        case SIOCGIFINDEX:
4924                ifr->ifr_ifindex = dev->ifindex;
4925                return 0;
4926
4927        case SIOCGIFTXQLEN:
4928                ifr->ifr_qlen = dev->tx_queue_len;
4929                return 0;
4930
4931        default:
4932                /* dev_ioctl() should ensure this case
4933                 * is never reached
4934                 */
4935                WARN_ON(1);
4936                err = -ENOTTY;
4937                break;
4938
4939        }
4940        return err;
4941}
4942
4943/*
4944 *      Perform the SIOCxIFxxx calls, inside rtnl_lock()
4945 */
4946static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4947{
4948        int err;
4949        struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4950        const struct net_device_ops *ops;
4951
4952        if (!dev)
4953                return -ENODEV;
4954
4955        ops = dev->netdev_ops;
4956
4957        switch (cmd) {
4958        case SIOCSIFFLAGS:      /* Set interface flags */
4959                return dev_change_flags(dev, ifr->ifr_flags);
4960
4961        case SIOCSIFMETRIC:     /* Set the metric on the interface
4962                                   (currently unused) */
4963                return -EOPNOTSUPP;
4964
4965        case SIOCSIFMTU:        /* Set the MTU of a device */
4966                return dev_set_mtu(dev, ifr->ifr_mtu);
4967
4968        case SIOCSIFHWADDR:
4969                return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4970
4971        case SIOCSIFHWBROADCAST:
4972                if (ifr->ifr_hwaddr.sa_family != dev->type)
4973                        return -EINVAL;
4974                memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4975                       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4976                call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4977                return 0;
4978
4979        case SIOCSIFMAP:
4980                if (ops->ndo_set_config) {
4981                        if (!netif_device_present(dev))
4982                                return -ENODEV;
4983                        return ops->ndo_set_config(dev, &ifr->ifr_map);
4984                }
4985                return -EOPNOTSUPP;
4986
4987        case SIOCADDMULTI:
4988                if (!ops->ndo_set_rx_mode ||
4989                    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4990                        return -EINVAL;
4991                if (!netif_device_present(dev))
4992                        return -ENODEV;
4993                return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
4994
4995        case SIOCDELMULTI:
4996                if (!ops->ndo_set_rx_mode ||
4997                    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4998                        return -EINVAL;
4999                if (!netif_device_present(dev))
5000                        return -ENODEV;
5001                return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
5002
5003        case SIOCSIFTXQLEN:
5004                if (ifr->ifr_qlen < 0)
5005                        return -EINVAL;
5006                dev->tx_queue_len = ifr->ifr_qlen;
5007                return 0;
5008
5009        case SIOCSIFNAME:
5010                ifr->ifr_newname[IFNAMSIZ-1] = '\0';
5011                return dev_change_name(dev, ifr->ifr_newname);
5012
5013        case SIOCSHWTSTAMP:
5014                err = net_hwtstamp_validate(ifr);
5015                if (err)
5016                        return err;
5017                /* fall through */
5018
5019        /*
5020         *      Unknown or private ioctl
5021         */
5022        default:
5023                if ((cmd >= SIOCDEVPRIVATE &&
5024                    cmd <= SIOCDEVPRIVATE + 15) ||
5025                    cmd == SIOCBONDENSLAVE ||
5026                    cmd == SIOCBONDRELEASE ||
5027                    cmd == SIOCBONDSETHWADDR ||
5028                    cmd == SIOCBONDSLAVEINFOQUERY ||
5029                    cmd == SIOCBONDINFOQUERY ||
5030                    cmd == SIOCBONDCHANGEACTIVE ||
5031                    cmd == SIOCGMIIPHY ||
5032                    cmd == SIOCGMIIREG ||
5033                    cmd == SIOCSMIIREG ||
5034                    cmd == SIOCBRADDIF ||
5035                    cmd == SIOCBRDELIF ||
5036                    cmd == SIOCSHWTSTAMP ||
5037                    cmd == SIOCWANDEV) {
5038                        err = -EOPNOTSUPP;
5039                        if (ops->ndo_do_ioctl) {
5040                                if (netif_device_present(dev))
5041                                        err = ops->ndo_do_ioctl(dev, ifr, cmd);
5042                                else
5043                                        err = -ENODEV;
5044                        }
5045                } else
5046                        err = -EINVAL;
5047
5048        }
5049        return err;
5050}
5051
5052/*
5053 *      This function handles all "interface"-type I/O control requests. The actual
5054 *      'doing' part of this is dev_ifsioc above.
5055 */
5056
5057/**
5058 *      dev_ioctl       -       network device ioctl
5059 *      @net: the applicable net namespace
5060 *      @cmd: command to issue
5061 *      @arg: pointer to a struct ifreq in user space
5062 *
5063 *      Issue ioctl functions to devices. This is normally called by the
5064 *      user space syscall interfaces but can sometimes be useful for
5065 *      other purposes. The return value is the return from the syscall if
5066 *      positive or a negative errno code on error.
5067 */
5068
5069int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
5070{
5071        struct ifreq ifr;
5072        int ret;
5073        char *colon;
5074
5075        /* One special case: SIOCGIFCONF takes ifconf argument
5076           and requires shared lock, because it sleeps writing
5077           to user space.
5078         */
5079
5080        if (cmd == SIOCGIFCONF) {
5081                rtnl_lock();
5082                ret = dev_ifconf(net, (char __user *) arg);
5083                rtnl_unlock();
5084                return ret;
5085        }
5086        if (cmd == SIOCGIFNAME)
5087                return dev_ifname(net, (struct ifreq __user *)arg);
5088
5089        if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
5090                return -EFAULT;
5091
5092        ifr.ifr_name[IFNAMSIZ-1] = 0;
5093
5094        colon = strchr(ifr.ifr_name, ':');
5095        if (colon)
5096                *colon = 0;
5097
5098        /*
5099         *      See which interface the caller is talking about.
5100         */
5101
5102        switch (cmd) {
5103        /*
5104         *      These ioctl calls:
5105         *      - can be done by all.
5106         *      - atomic and do not require locking.
5107         *      - return a value
5108         */
5109        case SIOCGIFFLAGS:
5110        case SIOCGIFMETRIC:
5111        case SIOCGIFMTU:
5112        case SIOCGIFHWADDR:
5113        case SIOCGIFSLAVE:
5114        case SIOCGIFMAP:
5115        case SIOCGIFINDEX:
5116        case SIOCGIFTXQLEN:
5117                dev_load(net, ifr.ifr_name);
5118                rcu_read_lock();
5119                ret = dev_ifsioc_locked(net, &ifr, cmd);
5120                rcu_read_unlock();
5121                if (!ret) {
5122                        if (colon)
5123                                *colon = ':';
5124                        if (copy_to_user(arg, &ifr,
5125                                         sizeof(struct ifreq)))
5126                                ret = -EFAULT;
5127                }
5128                return ret;
5129
5130        case SIOCETHTOOL:
5131                dev_load(net, ifr.ifr_name);
5132                rtnl_lock();
5133                ret = dev_ethtool(net, &ifr);
5134                rtnl_unlock();
5135                if (!ret) {
5136                        if (colon)
5137                                *colon = ':';
5138                        if (copy_to_user(arg, &ifr,
5139                                         sizeof(struct ifreq)))
5140                                ret = -EFAULT;
5141                }
5142                return ret;
5143
5144        /*
5145         *      These ioctl calls:
5146         *      - require superuser power.
5147         *      - require strict serialization.
5148         *      - return a value
5149         */
5150        case SIOCGMIIPHY:
5151        case SIOCGMIIREG:
5152        case SIOCSIFNAME:
5153                if (!capable(CAP_NET_ADMIN))
5154                        return -EPERM;
5155                dev_load(net, ifr.ifr_name);
5156                rtnl_lock();
5157                ret = dev_ifsioc(net, &ifr, cmd);
5158                rtnl_unlock();
5159                if (!ret) {
5160                        if (colon)
5161                                *colon = ':';
5162                        if (copy_to_user(arg, &ifr,
5163                                         sizeof(struct ifreq)))
5164                                ret = -EFAULT;
5165                }
5166                return ret;
5167
5168        /*
5169         *      These ioctl calls:
5170         *      - require superuser power.
5171         *      - require strict serialization.
5172         *      - do not return a value
5173         */
5174        case SIOCSIFFLAGS:
5175        case SIOCSIFMETRIC:
5176        case SIOCSIFMTU:
5177        case SIOCSIFMAP:
5178        case SIOCSIFHWADDR:
5179        case SIOCSIFSLAVE:
5180        case SIOCADDMULTI:
5181        case SIOCDELMULTI:
5182        case SIOCSIFHWBROADCAST:
5183        case SIOCSIFTXQLEN:
5184        case SIOCSMIIREG:
5185        case SIOCBONDENSLAVE:
5186        case SIOCBONDRELEASE:
5187        case SIOCBONDSETHWADDR:
5188        case SIOCBONDCHANGEACTIVE:
5189        case SIOCBRADDIF:
5190        case SIOCBRDELIF:
5191        case SIOCSHWTSTAMP:
5192                if (!capable(CAP_NET_ADMIN))
5193                        return -EPERM;
5194                /* fall through */
5195        case SIOCBONDSLAVEINFOQUERY:
5196        case SIOCBONDINFOQUERY:
5197                dev_load(net, ifr.ifr_name);
5198                rtnl_lock();
5199                ret = dev_ifsioc(net, &ifr, cmd);
5200                rtnl_unlock();
5201                return ret;
5202
5203        case SIOCGIFMEM:
5204                /* Get the per device memory space. We can add this but
5205                 * currently do not support it */
5206        case SIOCSIFMEM:
5207                /* Set the per device memory buffer space.
5208                 * Not applicable in our case */
5209        case SIOCSIFLINK:
5210                return -ENOTTY;
5211
5212        /*
5213         *      Unknown or private ioctl.
5214         */
5215        default:
5216                if (cmd == SIOCWANDEV ||
5217                    (cmd >= SIOCDEVPRIVATE &&
5218                     cmd <= SIOCDEVPRIVATE + 15)) {
5219                        dev_load(net, ifr.ifr_name);
5220                        rtnl_lock();
5221                        ret = dev_ifsioc(net, &ifr, cmd);
5222                        rtnl_unlock();
5223                        if (!ret && copy_to_user(arg, &ifr,
5224                                                 sizeof(struct ifreq)))
5225                                ret = -EFAULT;
5226                        return ret;
5227                }
5228                /* Take care of Wireless Extensions */
5229                if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
5230                        return wext_handle_ioctl(net, &ifr, cmd, arg);
5231                return -ENOTTY;
5232        }
5233}
5234
5235
5236/**
5237 *      dev_new_index   -       allocate an ifindex
5238 *      @net: the applicable net namespace
5239 *
5240 *      Returns a suitable unique value for a new device interface
5241 *      number.  The caller must hold the rtnl semaphore or the
5242 *      dev_base_lock to be sure it remains unique.
5243 */
5244static int dev_new_index(struct net *net)
5245{
5246        static int ifindex;
5247        for (;;) {
5248                if (++ifindex <= 0)
5249                        ifindex = 1;
5250                if (!__dev_get_by_index(net, ifindex))
5251                        return ifindex;
5252        }
5253}
5254
5255/* Delayed registration/unregisteration */
5256static LIST_HEAD(net_todo_list);
5257
5258static void net_set_todo(struct net_device *dev)
5259{
5260        list_add_tail(&dev->todo_list, &net_todo_list);
5261}
5262
5263static void rollback_registered_many(struct list_head *head)
5264{
5265        struct net_device *dev, *tmp;
5266
5267        BUG_ON(dev_boot_phase);
5268        ASSERT_RTNL();
5269
5270        list_for_each_entry_safe(dev, tmp, head, unreg_list) {
5271                /* Some devices call without registering
5272                 * for initialization unwind. Remove those
5273                 * devices and proceed with the remaining.
5274                 */
5275                if (dev->reg_state == NETREG_UNINITIALIZED) {
5276                        pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5277                                 dev->name, dev);
5278
5279                        WARN_ON(1);
5280                        list_del(&dev->unreg_list);
5281                        continue;
5282                }
5283                dev->dismantle = true;
5284                BUG_ON(dev->reg_state != NETREG_REGISTERED);
5285        }
5286
5287        /* If device is running, close it first. */
5288        dev_close_many(head);
5289
5290        list_for_each_entry(dev, head, unreg_list) {
5291                /* And unlink it from device chain. */
5292                unlist_netdevice(dev);
5293
5294                dev->reg_state = NETREG_UNREGISTERING;
5295        }
5296
5297        synchronize_net();
5298
5299        list_for_each_entry(dev, head, unreg_list) {
5300                /* Shutdown queueing discipline. */
5301                dev_shutdown(dev);
5302
5303
5304                /* Notify protocols, that we are about to destroy
5305                   this device. They should clean all the things.
5306                */
5307                call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5308
5309                if (!dev->rtnl_link_ops ||
5310                    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5311                        rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5312
5313                /*
5314                 *      Flush the unicast and multicast chains
5315                 */
5316                dev_uc_flush(dev);
5317                dev_mc_flush(dev);
5318
5319                if (dev->netdev_ops->ndo_uninit)
5320                        dev->netdev_ops->ndo_uninit(dev);
5321
5322                /* Notifier chain MUST detach us from master device. */
5323                WARN_ON(dev->master);
5324
5325                /* Remove entries from kobject tree */
5326                netdev_unregister_kobject(dev);
5327        }
5328
5329        /* Process any work delayed until the end of the batch */
5330        dev = list_first_entry(head, struct net_device, unreg_list);
5331        call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5332
5333        synchronize_net();
5334
5335        list_for_each_entry(dev, head, unreg_list)
5336                dev_put(dev);
5337}
5338
5339static void rollback_registered(struct net_device *dev)
5340{
5341        LIST_HEAD(single);
5342
5343        list_add(&dev->unreg_list, &single);
5344        rollback_registered_many(&single);
5345        list_del(&single);
5346}
5347
5348static netdev_features_t netdev_fix_features(struct net_device *dev,
5349        netdev_features_t features)
5350{
5351        /* Fix illegal checksum combinations */
5352        if ((features & NETIF_F_HW_CSUM) &&
5353            (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5354                netdev_warn(dev, "mixed HW and IP checksum settings.\n");
5355                features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5356        }
5357
5358        /* Fix illegal SG+CSUM combinations. */
5359        if ((features & NETIF_F_SG) &&
5360            !(features & NETIF_F_ALL_CSUM)) {
5361                netdev_dbg(dev,
5362                        "Dropping NETIF_F_SG since no checksum feature.\n");
5363                features &= ~NETIF_F_SG;
5364        }
5365
5366        /* TSO requires that SG is present as well. */
5367        if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
5368                netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
5369                features &= ~NETIF_F_ALL_TSO;
5370        }
5371
5372        /* TSO ECN requires that TSO is present as well. */
5373        if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5374                features &= ~NETIF_F_TSO_ECN;
5375
5376        /* Software GSO depends on SG. */
5377        if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5378                netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
5379                features &= ~NETIF_F_GSO;
5380        }
5381
5382        /* UFO needs SG and checksumming */
5383        if (features & NETIF_F_UFO) {
5384                /* maybe split UFO into V4 and V6? */
5385                if (!((features & NETIF_F_GEN_CSUM) ||
5386                    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5387                            == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5388                        netdev_dbg(dev,
5389                                "Dropping NETIF_F_UFO since no checksum offload features.\n");
5390                        features &= ~NETIF_F_UFO;
5391                }
5392
5393                if (!(features & NETIF_F_SG)) {
5394                        netdev_dbg(dev,
5395                                "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
5396                        features &= ~NETIF_F_UFO;
5397                }
5398        }
5399
5400        return features;
5401}
5402
5403int __netdev_update_features(struct net_device *dev)
5404{
5405        netdev_features_t features;
5406        int err = 0;
5407
5408        ASSERT_RTNL();
5409
5410        features = netdev_get_wanted_features(dev);
5411
5412        if (dev->netdev_ops->ndo_fix_features)
5413                features = dev->netdev_ops->ndo_fix_features(dev, features);
5414
5415        /* driver might be less strict about feature dependencies */
5416        features = netdev_fix_features(dev, features);
5417
5418        if (dev->features == features)
5419                return 0;
5420
5421        netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5422                &dev->features, &features);
5423
5424        if (dev->netdev_ops->ndo_set_features)
5425                err = dev->netdev_ops->ndo_set_features(dev, features);
5426
5427        if (unlikely(err < 0)) {
5428                netdev_err(dev,
5429                        "set_features() failed (%d); wanted %pNF, left %pNF\n",
5430                        err, &features, &dev->features);
5431                return -1;
5432        }
5433
5434        if (!err)
5435                dev->features = features;
5436
5437        return 1;
5438}
5439
5440/**
5441 *      netdev_update_features - recalculate device features
5442 *      @dev: the device to check
5443 *
5444 *      Recalculate dev->features set and send notifications if it
5445 *      has changed. Should be called after driver or hardware dependent
5446 *      conditions might have changed that influence the features.
5447 */
5448void netdev_update_features(struct net_device *dev)
5449{
5450        if (__netdev_update_features(dev))
5451                netdev_features_change(dev);
5452}
5453EXPORT_SYMBOL(netdev_update_features);
5454
5455/**
5456 *      netdev_change_features - recalculate device features
5457 *      @dev: the device to check
5458 *
5459 *      Recalculate dev->features set and send notifications even
5460 *      if they have not changed. Should be called instead of
5461 *      netdev_update_features() if also dev->vlan_features might
5462 *      have changed to allow the changes to be propagated to stacked
5463 *      VLAN devices.
5464 */
5465void netdev_change_features(struct net_device *dev)
5466{
5467        __netdev_update_features(dev);
5468        netdev_features_change(dev);
5469}
5470EXPORT_SYMBOL(netdev_change_features);
5471
5472/**
5473 *      netif_stacked_transfer_operstate -      transfer operstate
5474 *      @rootdev: the root or lower level device to transfer state from
5475 *      @dev: the device to transfer operstate to
5476 *
5477 *      Transfer operational state from root to device. This is normally
5478 *      called when a stacking relationship exists between the root
5479 *      device and the device(a leaf device).
5480 */
5481void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5482                                        struct net_device *dev)
5483{
5484        if (rootdev->operstate == IF_OPER_DORMANT)
5485                netif_dormant_on(dev);
5486        else
5487                netif_dormant_off(dev);
5488
5489        if (netif_carrier_ok(rootdev)) {
5490                if (!netif_carrier_ok(dev))
5491                        netif_carrier_on(dev);
5492        } else {
5493                if (netif_carrier_ok(dev))
5494                        netif_carrier_off(dev);
5495        }
5496}
5497EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5498
5499#ifdef CONFIG_RPS
5500static int netif_alloc_rx_queues(struct net_device *dev)
5501{
5502        unsigned int i, count = dev->num_rx_queues;
5503        struct netdev_rx_queue *rx;
5504
5505        BUG_ON(count < 1);
5506
5507        rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5508        if (!rx) {
5509                pr_err("netdev: Unable to allocate %u rx queues\n", count);
5510                return -ENOMEM;
5511        }
5512        dev->_rx = rx;
5513
5514        for (i = 0; i < count; i++)
5515                rx[i].dev = dev;
5516        return 0;
5517}
5518#endif
5519
5520static void netdev_init_one_queue(struct net_device *dev,
5521                                  struct netdev_queue *queue, void *_unused)
5522{
5523        /* Initialize queue lock */
5524        spin_lock_init(&queue->_xmit_lock);
5525        netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5526        queue->xmit_lock_owner = -1;
5527        netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
5528        queue->dev = dev;
5529#ifdef CONFIG_BQL
5530        dql_init(&queue->dql, HZ);
5531#endif
5532}
5533
5534static int netif_alloc_netdev_queues(struct net_device *dev)
5535{
5536        unsigned int count = dev->num_tx_queues;
5537        struct netdev_queue *tx;
5538
5539        BUG_ON(count < 1);
5540
5541        tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5542        if (!tx) {
5543                pr_err("netdev: Unable to allocate %u tx queues\n", count);
5544                return -ENOMEM;
5545        }
5546        dev->_tx = tx;
5547
5548        netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5549        spin_lock_init(&dev->tx_global_lock);
5550
5551        return 0;
5552}
5553
5554/**
5555 *      register_netdevice      - register a network device
5556 *      @dev: device to register
5557 *
5558 *      Take a completed network device structure and add it to the kernel
5559 *      interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5560 *      chain. 0 is returned on success. A negative errno code is returned
5561 *      on a failure to set up the device, or if the name is a duplicate.
5562 *
5563 *      Callers must hold the rtnl semaphore. You may want
5564 *      register_netdev() instead of this.
5565 *
5566 *      BUGS:
5567 *      The locking appears insufficient to guarantee two parallel registers
5568 *      will not get the same name.
5569 */
5570
5571int register_netdevice(struct net_device *dev)
5572{
5573        int ret;
5574        struct net *net = dev_net(dev);
5575
5576        BUG_ON(dev_boot_phase);
5577        ASSERT_RTNL();
5578
5579        might_sleep();
5580
5581        /* When net_device's are persistent, this will be fatal. */
5582        BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
5583        BUG_ON(!net);
5584
5585        spin_lock_init(&dev->addr_list_lock);
5586        netdev_set_addr_lockdep_class(dev);
5587
5588        dev->iflink = -1;
5589
5590        ret = dev_get_valid_name(dev, dev->name);
5591        if (ret < 0)
5592                goto out;
5593
5594        /* Init, if this function is available */
5595        if (dev->netdev_ops->ndo_init) {
5596                ret = dev->netdev_ops->ndo_init(dev);
5597                if (ret) {
5598                        if (ret > 0)
5599                                ret = -EIO;
5600                        goto out;
5601                }
5602        }
5603
5604        dev->ifindex = dev_new_index(net);
5605        if (dev->iflink == -1)
5606                dev->iflink = dev->ifindex;
5607
5608        /* Transfer changeable features to wanted_features and enable
5609         * software offloads (GSO and GRO).
5610         */
5611        dev->hw_features |= NETIF_F_SOFT_FEATURES;
5612        dev->features |= NETIF_F_SOFT_FEATURES;
5613        dev->wanted_features = dev->features & dev->hw_features;
5614
5615        /* Turn on no cache copy if HW is doing checksum */
5616        if (!(dev->flags & IFF_LOOPBACK)) {
5617                dev->hw_features |= NETIF_F_NOCACHE_COPY;
5618                if (dev->features & NETIF_F_ALL_CSUM) {
5619                        dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5620                        dev->features |= NETIF_F_NOCACHE_COPY;
5621                }
5622        }
5623
5624        /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
5625         */
5626        dev->vlan_features |= NETIF_F_HIGHDMA;
5627
5628        ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5629        ret = notifier_to_errno(ret);
5630        if (ret)
5631                goto err_uninit;
5632
5633        ret = netdev_register_kobject(dev);
5634        if (ret)
5635                goto err_uninit;
5636        dev->reg_state = NETREG_REGISTERED;
5637
5638        __netdev_update_features(dev);
5639
5640        /*
5641         *      Default initial state at registry is that the
5642         *      device is present.
5643         */
5644
5645        set_bit(__LINK_STATE_PRESENT, &dev->state);
5646
5647        dev_init_scheduler(dev);
5648        dev_hold(dev);
5649        list_netdevice(dev);
5650        add_device_randomness(dev->dev_addr, dev->addr_len);
5651
5652        /* Notify protocols, that a new device appeared. */
5653        ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
5654        ret = notifier_to_errno(ret);
5655        if (ret) {
5656                rollback_registered(dev);
5657                dev->reg_state = NETREG_UNREGISTERED;
5658        }
5659        /*
5660         *      Prevent userspace races by waiting until the network
5661         *      device is fully setup before sending notifications.
5662         */
5663        if (!dev->rtnl_link_ops ||
5664            dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5665                rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5666
5667out:
5668        return ret;
5669
5670err_uninit:
5671        if (dev->netdev_ops->ndo_uninit)
5672                dev->netdev_ops->ndo_uninit(dev);
5673        goto out;
5674}
5675EXPORT_SYMBOL(register_netdevice);
5676
5677/**
5678 *      init_dummy_netdev       - init a dummy network device for NAPI
5679 *      @dev: device to init
5680 *
5681 *      This takes a network device structure and initialize the minimum
5682 *      amount of fields so it can be used to schedule NAPI polls without
5683 *      registering a full blown interface. This is to be used by drivers
5684 *      that need to tie several hardware interfaces to a single NAPI
5685 *      poll scheduler due to HW limitations.
5686 */
5687int init_dummy_netdev(struct net_device *dev)
5688{
5689        /* Clear everything. Note we don't initialize spinlocks
5690         * are they aren't supposed to be taken by any of the
5691         * NAPI code and this dummy netdev is supposed to be
5692         * only ever used for NAPI polls
5693         */
5694        memset(dev, 0, sizeof(struct net_device));
5695
5696        /* make sure we BUG if trying to hit standard
5697         * register/unregister code path
5698         */
5699        dev->reg_state = NETREG_DUMMY;
5700
5701        /* NAPI wants this */
5702        INIT_LIST_HEAD(&dev->napi_list);
5703
5704        /* a dummy interface is started by default */
5705        set_bit(__LINK_STATE_PRESENT, &dev->state);
5706        set_bit(__LINK_STATE_START, &dev->state);
5707
5708        /* Note : We dont allocate pcpu_refcnt for dummy devices,
5709         * because users of this 'device' dont need to change
5710         * its refcount.
5711         */
5712
5713        return 0;
5714}
5715EXPORT_SYMBOL_GPL(init_dummy_netdev);
5716
5717
5718/**
5719 *      register_netdev - register a network device
5720 *      @dev: device to register
5721 *
5722 *      Take a completed network device structure and add it to the kernel
5723 *      interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5724 *      chain. 0 is returned on success. A negative errno code is returned
5725 *      on a failure to set up the device, or if the name is a duplicate.
5726 *
5727 *      This is a wrapper around register_netdevice that takes the rtnl semaphore
5728 *      and expands the device name if you passed a format string to
5729 *      alloc_netdev.
5730 */
5731int register_netdev(struct net_device *dev)
5732{
5733        int err;
5734
5735        rtnl_lock();
5736        err = register_netdevice(dev);
5737        rtnl_unlock();
5738        return err;
5739}
5740EXPORT_SYMBOL(register_netdev);
5741
5742int netdev_refcnt_read(const struct net_device *dev)
5743{
5744        int i, refcnt = 0;
5745
5746        for_each_possible_cpu(i)
5747                refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5748        return refcnt;
5749}
5750EXPORT_SYMBOL(netdev_refcnt_read);
5751
5752/**
5753 * netdev_wait_allrefs - wait until all references are gone.
5754 * @dev: target net_device
5755 *
5756 * This is called when unregistering network devices.
5757 *
5758 * Any protocol or device that holds a reference should register
5759 * for netdevice notification, and cleanup and put back the
5760 * reference if they receive an UNREGISTER event.
5761 * We can get stuck here if buggy protocols don't correctly
5762 * call dev_put.
5763 */
5764static void netdev_wait_allrefs(struct net_device *dev)
5765{
5766        unsigned long rebroadcast_time, warning_time;
5767        int refcnt;
5768
5769        linkwatch_forget_dev(dev);
5770
5771        rebroadcast_time = warning_time = jiffies;
5772        refcnt = netdev_refcnt_read(dev);
5773
5774        while (refcnt != 0) {
5775                if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
5776                        rtnl_lock();
5777
5778                        /* Rebroadcast unregister notification */
5779                        call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5780                        /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
5781                         * should have already handle it the first time */
5782
5783                        if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5784                                     &dev->state)) {
5785                                /* We must not have linkwatch events
5786                                 * pending on unregister. If this
5787                                 * happens, we simply run the queue
5788                                 * unscheduled, resulting in a noop
5789                                 * for this device.
5790                                 */
5791                                linkwatch_run_queue();
5792                        }
5793
5794                        __rtnl_unlock();
5795
5796                        rebroadcast_time = jiffies;
5797                }
5798
5799                msleep(250);
5800
5801                refcnt = netdev_refcnt_read(dev);
5802
5803                if (time_after(jiffies, warning_time + 10 * HZ)) {
5804                        pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5805                                 dev->name, refcnt);
5806                        warning_time = jiffies;
5807                }
5808        }
5809}
5810
5811/* The sequence is:
5812 *
5813 *      rtnl_lock();
5814 *      ...
5815 *      register_netdevice(x1);
5816 *      register_netdevice(x2);
5817 *      ...
5818 *      unregister_netdevice(y1);
5819 *      unregister_netdevice(y2);
5820 *      ...
5821 *      rtnl_unlock();
5822 *      free_netdev(y1);
5823 *      free_netdev(y2);
5824 *
5825 * We are invoked by rtnl_unlock().
5826 * This allows us to deal with problems:
5827 * 1) We can delete sysfs objects which invoke hotplug
5828 *    without deadlocking with linkwatch via keventd.
5829 * 2) Since we run with the RTNL semaphore not held, we can sleep
5830 *    safely in order to wait for the netdev refcnt to drop to zero.
5831 *
5832 * We must not return until all unregister events added during
5833 * the interval the lock was held have been completed.
5834 */
5835void netdev_run_todo(void)
5836{
5837        struct list_head list;
5838
5839        /* Snapshot list, allow later requests */
5840        list_replace_init(&net_todo_list, &list);
5841
5842        __rtnl_unlock();
5843
5844        /* Wait for rcu callbacks to finish before attempting to drain
5845         * the device list.  This usually avoids a 250ms wait.
5846         */
5847        if (!list_empty(&list))
5848                rcu_barrier();
5849
5850        while (!list_empty(&list)) {
5851                struct net_device *dev
5852                        = list_first_entry(&list, struct net_device, todo_list);
5853                list_del(&dev->todo_list);
5854
5855                if (unlikely(