linux/net/core/dev.c
<<
>>
Prefs
   1/*
   2 *      NET3    Protocol independent device support routines.
   3 *
   4 *              This program is free software; you can redistribute it and/or
   5 *              modify it under the terms of the GNU General Public License
   6 *              as published by the Free Software Foundation; either version
   7 *              2 of the License, or (at your option) any later version.
   8 *
   9 *      Derived from the non IP parts of dev.c 1.0.19
  10 *              Authors:        Ross Biro
  11 *                              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *                              Mark Evans, <evansmp@uhura.aston.ac.uk>
  13 *
  14 *      Additional Authors:
  15 *              Florian la Roche <rzsfl@rz.uni-sb.de>
  16 *              Alan Cox <gw4pts@gw4pts.ampr.org>
  17 *              David Hinds <dahinds@users.sourceforge.net>
  18 *              Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
  19 *              Adam Sulmicki <adam@cfar.umd.edu>
  20 *              Pekka Riikonen <priikone@poesidon.pspt.fi>
  21 *
  22 *      Changes:
  23 *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
  24 *                                      to 2 if register_netdev gets called
  25 *                                      before net_dev_init & also removed a
  26 *                                      few lines of code in the process.
  27 *              Alan Cox        :       device private ioctl copies fields back.
  28 *              Alan Cox        :       Transmit queue code does relevant
  29 *                                      stunts to keep the queue safe.
  30 *              Alan Cox        :       Fixed double lock.
  31 *              Alan Cox        :       Fixed promisc NULL pointer trap
  32 *              ????????        :       Support the full private ioctl range
  33 *              Alan Cox        :       Moved ioctl permission check into
  34 *                                      drivers
  35 *              Tim Kordas      :       SIOCADDMULTI/SIOCDELMULTI
  36 *              Alan Cox        :       100 backlog just doesn't cut it when
  37 *                                      you start doing multicast video 8)
  38 *              Alan Cox        :       Rewrote net_bh and list manager.
  39 *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
  40 *              Alan Cox        :       Took out transmit every packet pass
  41 *                                      Saved a few bytes in the ioctl handler
  42 *              Alan Cox        :       Network driver sets packet type before
  43 *                                      calling netif_rx. Saves a function
  44 *                                      call a packet.
  45 *              Alan Cox        :       Hashed net_bh()
  46 *              Richard Kooijman:       Timestamp fixes.
  47 *              Alan Cox        :       Wrong field in SIOCGIFDSTADDR
  48 *              Alan Cox        :       Device lock protection.
  49 *              Alan Cox        :       Fixed nasty side effect of device close
  50 *                                      changes.
  51 *              Rudi Cilibrasi  :       Pass the right thing to
  52 *                                      set_mac_address()
  53 *              Dave Miller     :       32bit quantity for the device lock to
  54 *                                      make it work out on a Sparc.
  55 *              Bjorn Ekwall    :       Added KERNELD hack.
  56 *              Alan Cox        :       Cleaned up the backlog initialise.
  57 *              Craig Metz      :       SIOCGIFCONF fix if space for under
  58 *                                      1 device.
  59 *          Thomas Bogendoerfer :       Return ENODEV for dev_open, if there
  60 *                                      is no device open function.
  61 *              Andi Kleen      :       Fix error reporting for SIOCGIFCONF
  62 *          Michael Chastain    :       Fix signed/unsigned for SIOCGIFCONF
  63 *              Cyrus Durgin    :       Cleaned for KMOD
  64 *              Adam Sulmicki   :       Bug Fix : Network Device Unload
  65 *                                      A network device unload needs to purge
  66 *                                      the backlog queue.
  67 *      Paul Rusty Russell      :       SIOCSIFNAME
  68 *              Pekka Riikonen  :       Netdev boot-time settings code
  69 *              Andrew Morton   :       Make unregister_netdevice wait
  70 *                                      indefinitely on dev->refcnt
  71 *              J Hadi Salim    :       - Backlog queue sampling
  72 *                                      - netif_rx() feedback
  73 */
  74
  75#include <asm/uaccess.h>
  76#include <linux/bitops.h>
  77#include <linux/capability.h>
  78#include <linux/cpu.h>
  79#include <linux/types.h>
  80#include <linux/kernel.h>
  81#include <linux/hash.h>
  82#include <linux/slab.h>
  83#include <linux/sched.h>
  84#include <linux/mutex.h>
  85#include <linux/string.h>
  86#include <linux/mm.h>
  87#include <linux/socket.h>
  88#include <linux/sockios.h>
  89#include <linux/errno.h>
  90#include <linux/interrupt.h>
  91#include <linux/if_ether.h>
  92#include <linux/netdevice.h>
  93#include <linux/etherdevice.h>
  94#include <linux/ethtool.h>
  95#include <linux/notifier.h>
  96#include <linux/skbuff.h>
  97#include <net/net_namespace.h>
  98#include <net/sock.h>
  99#include <linux/rtnetlink.h>
 100#include <linux/proc_fs.h>
 101#include <linux/seq_file.h>
 102#include <linux/stat.h>
 103#include <net/dst.h>
 104#include <net/pkt_sched.h>
 105#include <net/checksum.h>
 106#include <net/xfrm.h>
 107#include <linux/highmem.h>
 108#include <linux/init.h>
 109#include <linux/kmod.h>
 110#include <linux/module.h>
 111#include <linux/netpoll.h>
 112#include <linux/rcupdate.h>
 113#include <linux/delay.h>
 114#include <net/wext.h>
 115#include <net/iw_handler.h>
 116#include <asm/current.h>
 117#include <linux/audit.h>
 118#include <linux/dmaengine.h>
 119#include <linux/err.h>
 120#include <linux/ctype.h>
 121#include <linux/if_arp.h>
 122#include <linux/if_vlan.h>
 123#include <linux/ip.h>
 124#include <net/ip.h>
 125#include <linux/ipv6.h>
 126#include <linux/in.h>
 127#include <linux/jhash.h>
 128#include <linux/random.h>
 129#include <trace/events/napi.h>
 130#include <trace/events/net.h>
 131#include <trace/events/skb.h>
 132#include <linux/pci.h>
 133#include <linux/inetdevice.h>
 134#include <linux/cpu_rmap.h>
 135#include <linux/net_tstamp.h>
 136#include <linux/static_key.h>
 137#include <net/flow_keys.h>
 138
 139#include "net-sysfs.h"
 140
 141/* Instead of increasing this, you should create a hash table. */
 142#define MAX_GRO_SKBS 8
 143
 144/* This should be increased if a protocol with a bigger head is added. */
 145#define GRO_MAX_HEAD (MAX_HEADER + 128)
 146
 147/*
 148 *      The list of packet types we will receive (as opposed to discard)
 149 *      and the routines to invoke.
 150 *
 151 *      Why 16. Because with 16 the only overlap we get on a hash of the
 152 *      low nibble of the protocol value is RARP/SNAP/X.25.
 153 *
 154 *      NOTE:  That is no longer true with the addition of VLAN tags.  Not
 155 *             sure which should go first, but I bet it won't make much
 156 *             difference if we are running VLANs.  The good news is that
 157 *             this protocol won't be in the list unless compiled in, so
 158 *             the average user (w/out VLANs) will not be adversely affected.
 159 *             --BLG
 160 *
 161 *              0800    IP
 162 *              8100    802.1Q VLAN
 163 *              0001    802.3
 164 *              0002    AX.25
 165 *              0004    802.2
 166 *              8035    RARP
 167 *              0005    SNAP
 168 *              0805    X.25
 169 *              0806    ARP
 170 *              8137    IPX
 171 *              0009    Localtalk
 172 *              86DD    IPv6
 173 */
 174
 175#define PTYPE_HASH_SIZE (16)
 176#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
 177
 178static DEFINE_SPINLOCK(ptype_lock);
 179static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
 180static struct list_head ptype_all __read_mostly;        /* Taps */
 181
 182/*
 183 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
 184 * semaphore.
 185 *
 186 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
 187 *
 188 * Writers must hold the rtnl semaphore while they loop through the
 189 * dev_base_head list, and hold dev_base_lock for writing when they do the
 190 * actual updates.  This allows pure readers to access the list even
 191 * while a writer is preparing to update it.
 192 *
 193 * To put it another way, dev_base_lock is held for writing only to
 194 * protect against pure readers; the rtnl semaphore provides the
 195 * protection against other writers.
 196 *
 197 * See, for example usages, register_netdevice() and
 198 * unregister_netdevice(), which must be called with the rtnl
 199 * semaphore held.
 200 */
 201DEFINE_RWLOCK(dev_base_lock);
 202EXPORT_SYMBOL(dev_base_lock);
 203
 204static inline void dev_base_seq_inc(struct net *net)
 205{
 206        while (++net->dev_base_seq == 0);
 207}
 208
 209static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
 210{
 211        unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
 212
 213        return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
 214}
 215
 216static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
 217{
 218        return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
 219}
 220
 221static inline void rps_lock(struct softnet_data *sd)
 222{
 223#ifdef CONFIG_RPS
 224        spin_lock(&sd->input_pkt_queue.lock);
 225#endif
 226}
 227
 228static inline void rps_unlock(struct softnet_data *sd)
 229{
 230#ifdef CONFIG_RPS
 231        spin_unlock(&sd->input_pkt_queue.lock);
 232#endif
 233}
 234
 235/* Device list insertion */
 236static int list_netdevice(struct net_device *dev)
 237{
 238        struct net *net = dev_net(dev);
 239
 240        ASSERT_RTNL();
 241
 242        write_lock_bh(&dev_base_lock);
 243        list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
 244        hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
 245        hlist_add_head_rcu(&dev->index_hlist,
 246                           dev_index_hash(net, dev->ifindex));
 247        write_unlock_bh(&dev_base_lock);
 248
 249        dev_base_seq_inc(net);
 250
 251        return 0;
 252}
 253
 254/* Device list removal
 255 * caller must respect a RCU grace period before freeing/reusing dev
 256 */
 257static void unlist_netdevice(struct net_device *dev)
 258{
 259        ASSERT_RTNL();
 260
 261        /* Unlink dev from the device chain */
 262        write_lock_bh(&dev_base_lock);
 263        list_del_rcu(&dev->dev_list);
 264        hlist_del_rcu(&dev->name_hlist);
 265        hlist_del_rcu(&dev->index_hlist);
 266        write_unlock_bh(&dev_base_lock);
 267
 268        dev_base_seq_inc(dev_net(dev));
 269}
 270
 271/*
 272 *      Our notifier list
 273 */
 274
 275static RAW_NOTIFIER_HEAD(netdev_chain);
 276
 277/*
 278 *      Device drivers call our routines to queue packets here. We empty the
 279 *      queue in the local softnet handler.
 280 */
 281
 282DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
 283EXPORT_PER_CPU_SYMBOL(softnet_data);
 284
 285#ifdef CONFIG_LOCKDEP
 286/*
 287 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
 288 * according to dev->type
 289 */
 290static const unsigned short netdev_lock_type[] =
 291        {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
 292         ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
 293         ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
 294         ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
 295         ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
 296         ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
 297         ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
 298         ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
 299         ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
 300         ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
 301         ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
 302         ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
 303         ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
 304         ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
 305         ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
 306
 307static const char *const netdev_lock_name[] =
 308        {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
 309         "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
 310         "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
 311         "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
 312         "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
 313         "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
 314         "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
 315         "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
 316         "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
 317         "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
 318         "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
 319         "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
 320         "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
 321         "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
 322         "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
 323
 324static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
 325static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
 326
 327static inline unsigned short netdev_lock_pos(unsigned short dev_type)
 328{
 329        int i;
 330
 331        for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
 332                if (netdev_lock_type[i] == dev_type)
 333                        return i;
 334        /* the last key is used by default */
 335        return ARRAY_SIZE(netdev_lock_type) - 1;
 336}
 337
 338static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
 339                                                 unsigned short dev_type)
 340{
 341        int i;
 342
 343        i = netdev_lock_pos(dev_type);
 344        lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
 345                                   netdev_lock_name[i]);
 346}
 347
 348static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
 349{
 350        int i;
 351
 352        i = netdev_lock_pos(dev->type);
 353        lockdep_set_class_and_name(&dev->addr_list_lock,
 354                                   &netdev_addr_lock_key[i],
 355                                   netdev_lock_name[i]);
 356}
 357#else
 358static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
 359                                                 unsigned short dev_type)
 360{
 361}
 362static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
 363{
 364}
 365#endif
 366
 367/*******************************************************************************
 368
 369                Protocol management and registration routines
 370
 371*******************************************************************************/
 372
 373/*
 374 *      Add a protocol ID to the list. Now that the input handler is
 375 *      smarter we can dispense with all the messy stuff that used to be
 376 *      here.
 377 *
 378 *      BEWARE!!! Protocol handlers, mangling input packets,
 379 *      MUST BE last in hash buckets and checking protocol handlers
 380 *      MUST start from promiscuous ptype_all chain in net_bh.
 381 *      It is true now, do not change it.
 382 *      Explanation follows: if protocol handler, mangling packet, will
 383 *      be the first on list, it is not able to sense, that packet
 384 *      is cloned and should be copied-on-write, so that it will
 385 *      change it and subsequent readers will get broken packet.
 386 *                                                      --ANK (980803)
 387 */
 388
 389static inline struct list_head *ptype_head(const struct packet_type *pt)
 390{
 391        if (pt->type == htons(ETH_P_ALL))
 392                return &ptype_all;
 393        else
 394                return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
 395}
 396
 397/**
 398 *      dev_add_pack - add packet handler
 399 *      @pt: packet type declaration
 400 *
 401 *      Add a protocol handler to the networking stack. The passed &packet_type
 402 *      is linked into kernel lists and may not be freed until it has been
 403 *      removed from the kernel lists.
 404 *
 405 *      This call does not sleep therefore it can not
 406 *      guarantee all CPU's that are in middle of receiving packets
 407 *      will see the new packet type (until the next received packet).
 408 */
 409
 410void dev_add_pack(struct packet_type *pt)
 411{
 412        struct list_head *head = ptype_head(pt);
 413
 414        spin_lock(&ptype_lock);
 415        list_add_rcu(&pt->list, head);
 416        spin_unlock(&ptype_lock);
 417}
 418EXPORT_SYMBOL(dev_add_pack);
 419
 420/**
 421 *      __dev_remove_pack        - remove packet handler
 422 *      @pt: packet type declaration
 423 *
 424 *      Remove a protocol handler that was previously added to the kernel
 425 *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
 426 *      from the kernel lists and can be freed or reused once this function
 427 *      returns.
 428 *
 429 *      The packet type might still be in use by receivers
 430 *      and must not be freed until after all the CPU's have gone
 431 *      through a quiescent state.
 432 */
 433void __dev_remove_pack(struct packet_type *pt)
 434{
 435        struct list_head *head = ptype_head(pt);
 436        struct packet_type *pt1;
 437
 438        spin_lock(&ptype_lock);
 439
 440        list_for_each_entry(pt1, head, list) {
 441                if (pt == pt1) {
 442                        list_del_rcu(&pt->list);
 443                        goto out;
 444                }
 445        }
 446
 447        pr_warn("dev_remove_pack: %p not found\n", pt);
 448out:
 449        spin_unlock(&ptype_lock);
 450}
 451EXPORT_SYMBOL(__dev_remove_pack);
 452
 453/**
 454 *      dev_remove_pack  - remove packet handler
 455 *      @pt: packet type declaration
 456 *
 457 *      Remove a protocol handler that was previously added to the kernel
 458 *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
 459 *      from the kernel lists and can be freed or reused once this function
 460 *      returns.
 461 *
 462 *      This call sleeps to guarantee that no CPU is looking at the packet
 463 *      type after return.
 464 */
 465void dev_remove_pack(struct packet_type *pt)
 466{
 467        __dev_remove_pack(pt);
 468
 469        synchronize_net();
 470}
 471EXPORT_SYMBOL(dev_remove_pack);
 472
 473/******************************************************************************
 474
 475                      Device Boot-time Settings Routines
 476
 477*******************************************************************************/
 478
 479/* Boot time configuration table */
 480static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
 481
 482/**
 483 *      netdev_boot_setup_add   - add new setup entry
 484 *      @name: name of the device
 485 *      @map: configured settings for the device
 486 *
 487 *      Adds new setup entry to the dev_boot_setup list.  The function
 488 *      returns 0 on error and 1 on success.  This is a generic routine to
 489 *      all netdevices.
 490 */
 491static int netdev_boot_setup_add(char *name, struct ifmap *map)
 492{
 493        struct netdev_boot_setup *s;
 494        int i;
 495
 496        s = dev_boot_setup;
 497        for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
 498                if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
 499                        memset(s[i].name, 0, sizeof(s[i].name));
 500                        strlcpy(s[i].name, name, IFNAMSIZ);
 501                        memcpy(&s[i].map, map, sizeof(s[i].map));
 502                        break;
 503                }
 504        }
 505
 506        return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
 507}
 508
 509/**
 510 *      netdev_boot_setup_check - check boot time settings
 511 *      @dev: the netdevice
 512 *
 513 *      Check boot time settings for the device.
 514 *      The found settings are set for the device to be used
 515 *      later in the device probing.
 516 *      Returns 0 if no settings found, 1 if they are.
 517 */
 518int netdev_boot_setup_check(struct net_device *dev)
 519{
 520        struct netdev_boot_setup *s = dev_boot_setup;
 521        int i;
 522
 523        for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
 524                if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
 525                    !strcmp(dev->name, s[i].name)) {
 526                        dev->irq        = s[i].map.irq;
 527                        dev->base_addr  = s[i].map.base_addr;
 528                        dev->mem_start  = s[i].map.mem_start;
 529                        dev->mem_end    = s[i].map.mem_end;
 530                        return 1;
 531                }
 532        }
 533        return 0;
 534}
 535EXPORT_SYMBOL(netdev_boot_setup_check);
 536
 537
 538/**
 539 *      netdev_boot_base        - get address from boot time settings
 540 *      @prefix: prefix for network device
 541 *      @unit: id for network device
 542 *
 543 *      Check boot time settings for the base address of device.
 544 *      The found settings are set for the device to be used
 545 *      later in the device probing.
 546 *      Returns 0 if no settings found.
 547 */
 548unsigned long netdev_boot_base(const char *prefix, int unit)
 549{
 550        const struct netdev_boot_setup *s = dev_boot_setup;
 551        char name[IFNAMSIZ];
 552        int i;
 553
 554        sprintf(name, "%s%d", prefix, unit);
 555
 556        /*
 557         * If device already registered then return base of 1
 558         * to indicate not to probe for this interface
 559         */
 560        if (__dev_get_by_name(&init_net, name))
 561                return 1;
 562
 563        for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
 564                if (!strcmp(name, s[i].name))
 565                        return s[i].map.base_addr;
 566        return 0;
 567}
 568
 569/*
 570 * Saves at boot time configured settings for any netdevice.
 571 */
 572int __init netdev_boot_setup(char *str)
 573{
 574        int ints[5];
 575        struct ifmap map;
 576
 577        str = get_options(str, ARRAY_SIZE(ints), ints);
 578        if (!str || !*str)
 579                return 0;
 580
 581        /* Save settings */
 582        memset(&map, 0, sizeof(map));
 583        if (ints[0] > 0)
 584                map.irq = ints[1];
 585        if (ints[0] > 1)
 586                map.base_addr = ints[2];
 587        if (ints[0] > 2)
 588                map.mem_start = ints[3];
 589        if (ints[0] > 3)
 590                map.mem_end = ints[4];
 591
 592        /* Add new entry to the list */
 593        return netdev_boot_setup_add(str, &map);
 594}
 595
 596__setup("netdev=", netdev_boot_setup);
 597
 598/*******************************************************************************
 599
 600                            Device Interface Subroutines
 601
 602*******************************************************************************/
 603
 604/**
 605 *      __dev_get_by_name       - find a device by its name
 606 *      @net: the applicable net namespace
 607 *      @name: name to find
 608 *
 609 *      Find an interface by name. Must be called under RTNL semaphore
 610 *      or @dev_base_lock. If the name is found a pointer to the device
 611 *      is returned. If the name is not found then %NULL is returned. The
 612 *      reference counters are not incremented so the caller must be
 613 *      careful with locks.
 614 */
 615
 616struct net_device *__dev_get_by_name(struct net *net, const char *name)
 617{
 618        struct hlist_node *p;
 619        struct net_device *dev;
 620        struct hlist_head *head = dev_name_hash(net, name);
 621
 622        hlist_for_each_entry(dev, p, head, name_hlist)
 623                if (!strncmp(dev->name, name, IFNAMSIZ))
 624                        return dev;
 625
 626        return NULL;
 627}
 628EXPORT_SYMBOL(__dev_get_by_name);
 629
 630/**
 631 *      dev_get_by_name_rcu     - find a device by its name
 632 *      @net: the applicable net namespace
 633 *      @name: name to find
 634 *
 635 *      Find an interface by name.
 636 *      If the name is found a pointer to the device is returned.
 637 *      If the name is not found then %NULL is returned.
 638 *      The reference counters are not incremented so the caller must be
 639 *      careful with locks. The caller must hold RCU lock.
 640 */
 641
 642struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
 643{
 644        struct hlist_node *p;
 645        struct net_device *dev;
 646        struct hlist_head *head = dev_name_hash(net, name);
 647
 648        hlist_for_each_entry_rcu(dev, p, head, name_hlist)
 649                if (!strncmp(dev->name, name, IFNAMSIZ))
 650                        return dev;
 651
 652        return NULL;
 653}
 654EXPORT_SYMBOL(dev_get_by_name_rcu);
 655
 656/**
 657 *      dev_get_by_name         - find a device by its name
 658 *      @net: the applicable net namespace
 659 *      @name: name to find
 660 *
 661 *      Find an interface by name. This can be called from any
 662 *      context and does its own locking. The returned handle has
 663 *      the usage count incremented and the caller must use dev_put() to
 664 *      release it when it is no longer needed. %NULL is returned if no
 665 *      matching device is found.
 666 */
 667
 668struct net_device *dev_get_by_name(struct net *net, const char *name)
 669{
 670        struct net_device *dev;
 671
 672        rcu_read_lock();
 673        dev = dev_get_by_name_rcu(net, name);
 674        if (dev)
 675                dev_hold(dev);
 676        rcu_read_unlock();
 677        return dev;
 678}
 679EXPORT_SYMBOL(dev_get_by_name);
 680
 681/**
 682 *      __dev_get_by_index - find a device by its ifindex
 683 *      @net: the applicable net namespace
 684 *      @ifindex: index of device
 685 *
 686 *      Search for an interface by index. Returns %NULL if the device
 687 *      is not found or a pointer to the device. The device has not
 688 *      had its reference counter increased so the caller must be careful
 689 *      about locking. The caller must hold either the RTNL semaphore
 690 *      or @dev_base_lock.
 691 */
 692
 693struct net_device *__dev_get_by_index(struct net *net, int ifindex)
 694{
 695        struct hlist_node *p;
 696        struct net_device *dev;
 697        struct hlist_head *head = dev_index_hash(net, ifindex);
 698
 699        hlist_for_each_entry(dev, p, head, index_hlist)
 700                if (dev->ifindex == ifindex)
 701                        return dev;
 702
 703        return NULL;
 704}
 705EXPORT_SYMBOL(__dev_get_by_index);
 706
 707/**
 708 *      dev_get_by_index_rcu - find a device by its ifindex
 709 *      @net: the applicable net namespace
 710 *      @ifindex: index of device
 711 *
 712 *      Search for an interface by index. Returns %NULL if the device
 713 *      is not found or a pointer to the device. The device has not
 714 *      had its reference counter increased so the caller must be careful
 715 *      about locking. The caller must hold RCU lock.
 716 */
 717
 718struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
 719{
 720        struct hlist_node *p;
 721        struct net_device *dev;
 722        struct hlist_head *head = dev_index_hash(net, ifindex);
 723
 724        hlist_for_each_entry_rcu(dev, p, head, index_hlist)
 725                if (dev->ifindex == ifindex)
 726                        return dev;
 727
 728        return NULL;
 729}
 730EXPORT_SYMBOL(dev_get_by_index_rcu);
 731
 732
 733/**
 734 *      dev_get_by_index - find a device by its ifindex
 735 *      @net: the applicable net namespace
 736 *      @ifindex: index of device
 737 *
 738 *      Search for an interface by index. Returns NULL if the device
 739 *      is not found or a pointer to the device. The device returned has
 740 *      had a reference added and the pointer is safe until the user calls
 741 *      dev_put to indicate they have finished with it.
 742 */
 743
 744struct net_device *dev_get_by_index(struct net *net, int ifindex)
 745{
 746        struct net_device *dev;
 747
 748        rcu_read_lock();
 749        dev = dev_get_by_index_rcu(net, ifindex);
 750        if (dev)
 751                dev_hold(dev);
 752        rcu_read_unlock();
 753        return dev;
 754}
 755EXPORT_SYMBOL(dev_get_by_index);
 756
 757/**
 758 *      dev_getbyhwaddr_rcu - find a device by its hardware address
 759 *      @net: the applicable net namespace
 760 *      @type: media type of device
 761 *      @ha: hardware address
 762 *
 763 *      Search for an interface by MAC address. Returns NULL if the device
 764 *      is not found or a pointer to the device.
 765 *      The caller must hold RCU or RTNL.
 766 *      The returned device has not had its ref count increased
 767 *      and the caller must therefore be careful about locking
 768 *
 769 */
 770
 771struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
 772                                       const char *ha)
 773{
 774        struct net_device *dev;
 775
 776        for_each_netdev_rcu(net, dev)
 777                if (dev->type == type &&
 778                    !memcmp(dev->dev_addr, ha, dev->addr_len))
 779                        return dev;
 780
 781        return NULL;
 782}
 783EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
 784
 785struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
 786{
 787        struct net_device *dev;
 788
 789        ASSERT_RTNL();
 790        for_each_netdev(net, dev)
 791                if (dev->type == type)
 792                        return dev;
 793
 794        return NULL;
 795}
 796EXPORT_SYMBOL(__dev_getfirstbyhwtype);
 797
 798struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
 799{
 800        struct net_device *dev, *ret = NULL;
 801
 802        rcu_read_lock();
 803        for_each_netdev_rcu(net, dev)
 804                if (dev->type == type) {
 805                        dev_hold(dev);
 806                        ret = dev;
 807                        break;
 808                }
 809        rcu_read_unlock();
 810        return ret;
 811}
 812EXPORT_SYMBOL(dev_getfirstbyhwtype);
 813
 814/**
 815 *      dev_get_by_flags_rcu - find any device with given flags
 816 *      @net: the applicable net namespace
 817 *      @if_flags: IFF_* values
 818 *      @mask: bitmask of bits in if_flags to check
 819 *
 820 *      Search for any interface with the given flags. Returns NULL if a device
 821 *      is not found or a pointer to the device. Must be called inside
 822 *      rcu_read_lock(), and result refcount is unchanged.
 823 */
 824
 825struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
 826                                    unsigned short mask)
 827{
 828        struct net_device *dev, *ret;
 829
 830        ret = NULL;
 831        for_each_netdev_rcu(net, dev) {
 832                if (((dev->flags ^ if_flags) & mask) == 0) {
 833                        ret = dev;
 834                        break;
 835                }
 836        }
 837        return ret;
 838}
 839EXPORT_SYMBOL(dev_get_by_flags_rcu);
 840
 841/**
 842 *      dev_valid_name - check if name is okay for network device
 843 *      @name: name string
 844 *
 845 *      Network device names need to be valid file names to
 846 *      to allow sysfs to work.  We also disallow any kind of
 847 *      whitespace.
 848 */
 849bool dev_valid_name(const char *name)
 850{
 851        if (*name == '\0')
 852                return false;
 853        if (strlen(name) >= IFNAMSIZ)
 854                return false;
 855        if (!strcmp(name, ".") || !strcmp(name, ".."))
 856                return false;
 857
 858        while (*name) {
 859                if (*name == '/' || isspace(*name))
 860                        return false;
 861                name++;
 862        }
 863        return true;
 864}
 865EXPORT_SYMBOL(dev_valid_name);
 866
 867/**
 868 *      __dev_alloc_name - allocate a name for a device
 869 *      @net: network namespace to allocate the device name in
 870 *      @name: name format string
 871 *      @buf:  scratch buffer and result name string
 872 *
 873 *      Passed a format string - eg "lt%d" it will try and find a suitable
 874 *      id. It scans list of devices to build up a free map, then chooses
 875 *      the first empty slot. The caller must hold the dev_base or rtnl lock
 876 *      while allocating the name and adding the device in order to avoid
 877 *      duplicates.
 878 *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
 879 *      Returns the number of the unit assigned or a negative errno code.
 880 */
 881
 882static int __dev_alloc_name(struct net *net, const char *name, char *buf)
 883{
 884        int i = 0;
 885        const char *p;
 886        const int max_netdevices = 8*PAGE_SIZE;
 887        unsigned long *inuse;
 888        struct net_device *d;
 889
 890        p = strnchr(name, IFNAMSIZ-1, '%');
 891        if (p) {
 892                /*
 893                 * Verify the string as this thing may have come from
 894                 * the user.  There must be either one "%d" and no other "%"
 895                 * characters.
 896                 */
 897                if (p[1] != 'd' || strchr(p + 2, '%'))
 898                        return -EINVAL;
 899
 900                /* Use one page as a bit array of possible slots */
 901                inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
 902                if (!inuse)
 903                        return -ENOMEM;
 904
 905                for_each_netdev(net, d) {
 906                        if (!sscanf(d->name, name, &i))
 907                                continue;
 908                        if (i < 0 || i >= max_netdevices)
 909                                continue;
 910
 911                        /*  avoid cases where sscanf is not exact inverse of printf */
 912                        snprintf(buf, IFNAMSIZ, name, i);
 913                        if (!strncmp(buf, d->name, IFNAMSIZ))
 914                                set_bit(i, inuse);
 915                }
 916
 917                i = find_first_zero_bit(inuse, max_netdevices);
 918                free_page((unsigned long) inuse);
 919        }
 920
 921        if (buf != name)
 922                snprintf(buf, IFNAMSIZ, name, i);
 923        if (!__dev_get_by_name(net, buf))
 924                return i;
 925
 926        /* It is possible to run out of possible slots
 927         * when the name is long and there isn't enough space left
 928         * for the digits, or if all bits are used.
 929         */
 930        return -ENFILE;
 931}
 932
 933/**
 934 *      dev_alloc_name - allocate a name for a device
 935 *      @dev: device
 936 *      @name: name format string
 937 *
 938 *      Passed a format string - eg "lt%d" it will try and find a suitable
 939 *      id. It scans list of devices to build up a free map, then chooses
 940 *      the first empty slot. The caller must hold the dev_base or rtnl lock
 941 *      while allocating the name and adding the device in order to avoid
 942 *      duplicates.
 943 *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
 944 *      Returns the number of the unit assigned or a negative errno code.
 945 */
 946
 947int dev_alloc_name(struct net_device *dev, const char *name)
 948{
 949        char buf[IFNAMSIZ];
 950        struct net *net;
 951        int ret;
 952
 953        BUG_ON(!dev_net(dev));
 954        net = dev_net(dev);
 955        ret = __dev_alloc_name(net, name, buf);
 956        if (ret >= 0)
 957                strlcpy(dev->name, buf, IFNAMSIZ);
 958        return ret;
 959}
 960EXPORT_SYMBOL(dev_alloc_name);
 961
 962static int dev_get_valid_name(struct net_device *dev, const char *name)
 963{
 964        struct net *net;
 965
 966        BUG_ON(!dev_net(dev));
 967        net = dev_net(dev);
 968
 969        if (!dev_valid_name(name))
 970                return -EINVAL;
 971
 972        if (strchr(name, '%'))
 973                return dev_alloc_name(dev, name);
 974        else if (__dev_get_by_name(net, name))
 975                return -EEXIST;
 976        else if (dev->name != name)
 977                strlcpy(dev->name, name, IFNAMSIZ);
 978
 979        return 0;
 980}
 981
 982/**
 983 *      dev_change_name - change name of a device
 984 *      @dev: device
 985 *      @newname: name (or format string) must be at least IFNAMSIZ
 986 *
 987 *      Change name of a device, can pass format strings "eth%d".
 988 *      for wildcarding.
 989 */
 990int dev_change_name(struct net_device *dev, const char *newname)
 991{
 992        char oldname[IFNAMSIZ];
 993        int err = 0;
 994        int ret;
 995        struct net *net;
 996
 997        ASSERT_RTNL();
 998        BUG_ON(!dev_net(dev));
 999
1000        net = dev_net(dev);
1001        if (dev->flags & IFF_UP)
1002                return -EBUSY;
1003
1004        if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
1005                return 0;
1006
1007        memcpy(oldname, dev->name, IFNAMSIZ);
1008
1009        err = dev_get_valid_name(dev, newname);
1010        if (err < 0)
1011                return err;
1012
1013rollback:
1014        ret = device_rename(&dev->dev, dev->name);
1015        if (ret) {
1016                memcpy(dev->name, oldname, IFNAMSIZ);
1017                return ret;
1018        }
1019
1020        write_lock_bh(&dev_base_lock);
1021        hlist_del_rcu(&dev->name_hlist);
1022        write_unlock_bh(&dev_base_lock);
1023
1024        synchronize_rcu();
1025
1026        write_lock_bh(&dev_base_lock);
1027        hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1028        write_unlock_bh(&dev_base_lock);
1029
1030        ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1031        ret = notifier_to_errno(ret);
1032
1033        if (ret) {
1034                /* err >= 0 after dev_alloc_name() or stores the first errno */
1035                if (err >= 0) {
1036                        err = ret;
1037                        memcpy(dev->name, oldname, IFNAMSIZ);
1038                        goto rollback;
1039                } else {
1040                        pr_err("%s: name change rollback failed: %d\n",
1041                               dev->name, ret);
1042                }
1043        }
1044
1045        return err;
1046}
1047
1048/**
1049 *      dev_set_alias - change ifalias of a device
1050 *      @dev: device
1051 *      @alias: name up to IFALIASZ
1052 *      @len: limit of bytes to copy from info
1053 *
1054 *      Set ifalias for a device,
1055 */
1056int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1057{
1058        char *new_ifalias;
1059
1060        ASSERT_RTNL();
1061
1062        if (len >= IFALIASZ)
1063                return -EINVAL;
1064
1065        if (!len) {
1066                if (dev->ifalias) {
1067                        kfree(dev->ifalias);
1068                        dev->ifalias = NULL;
1069                }
1070                return 0;
1071        }
1072
1073        new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1074        if (!new_ifalias)
1075                return -ENOMEM;
1076        dev->ifalias = new_ifalias;
1077
1078        strlcpy(dev->ifalias, alias, len+1);
1079        return len;
1080}
1081
1082
1083/**
1084 *      netdev_features_change - device changes features
1085 *      @dev: device to cause notification
1086 *
1087 *      Called to indicate a device has changed features.
1088 */
1089void netdev_features_change(struct net_device *dev)
1090{
1091        call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1092}
1093EXPORT_SYMBOL(netdev_features_change);
1094
1095/**
1096 *      netdev_state_change - device changes state
1097 *      @dev: device to cause notification
1098 *
1099 *      Called to indicate a device has changed state. This function calls
1100 *      the notifier chains for netdev_chain and sends a NEWLINK message
1101 *      to the routing socket.
1102 */
1103void netdev_state_change(struct net_device *dev)
1104{
1105        if (dev->flags & IFF_UP) {
1106                call_netdevice_notifiers(NETDEV_CHANGE, dev);
1107                rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1108        }
1109}
1110EXPORT_SYMBOL(netdev_state_change);
1111
1112int netdev_bonding_change(struct net_device *dev, unsigned long event)
1113{
1114        return call_netdevice_notifiers(event, dev);
1115}
1116EXPORT_SYMBOL(netdev_bonding_change);
1117
1118/**
1119 *      dev_load        - load a network module
1120 *      @net: the applicable net namespace
1121 *      @name: name of interface
1122 *
1123 *      If a network interface is not present and the process has suitable
1124 *      privileges this function loads the module. If module loading is not
1125 *      available in this kernel then it becomes a nop.
1126 */
1127
1128void dev_load(struct net *net, const char *name)
1129{
1130        struct net_device *dev;
1131        int no_module;
1132
1133        rcu_read_lock();
1134        dev = dev_get_by_name_rcu(net, name);
1135        rcu_read_unlock();
1136
1137        no_module = !dev;
1138        if (no_module && capable(CAP_NET_ADMIN))
1139                no_module = request_module("netdev-%s", name);
1140        if (no_module && capable(CAP_SYS_MODULE)) {
1141                if (!request_module("%s", name))
1142                        pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated).  Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1143                                name);
1144        }
1145}
1146EXPORT_SYMBOL(dev_load);
1147
1148static int __dev_open(struct net_device *dev)
1149{
1150        const struct net_device_ops *ops = dev->netdev_ops;
1151        int ret;
1152
1153        ASSERT_RTNL();
1154
1155        if (!netif_device_present(dev))
1156                return -ENODEV;
1157
1158        ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1159        ret = notifier_to_errno(ret);
1160        if (ret)
1161                return ret;
1162
1163        set_bit(__LINK_STATE_START, &dev->state);
1164
1165        if (ops->ndo_validate_addr)
1166                ret = ops->ndo_validate_addr(dev);
1167
1168        if (!ret && ops->ndo_open)
1169                ret = ops->ndo_open(dev);
1170
1171        if (ret)
1172                clear_bit(__LINK_STATE_START, &dev->state);
1173        else {
1174                dev->flags |= IFF_UP;
1175                net_dmaengine_get();
1176                dev_set_rx_mode(dev);
1177                dev_activate(dev);
1178                add_device_randomness(dev->dev_addr, dev->addr_len);
1179        }
1180
1181        return ret;
1182}
1183
1184/**
1185 *      dev_open        - prepare an interface for use.
1186 *      @dev:   device to open
1187 *
1188 *      Takes a device from down to up state. The device's private open
1189 *      function is invoked and then the multicast lists are loaded. Finally
1190 *      the device is moved into the up state and a %NETDEV_UP message is
1191 *      sent to the netdev notifier chain.
1192 *
1193 *      Calling this function on an active interface is a nop. On a failure
1194 *      a negative errno code is returned.
1195 */
1196int dev_open(struct net_device *dev)
1197{
1198        int ret;
1199
1200        if (dev->flags & IFF_UP)
1201                return 0;
1202
1203        ret = __dev_open(dev);
1204        if (ret < 0)
1205                return ret;
1206
1207        rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1208        call_netdevice_notifiers(NETDEV_UP, dev);
1209
1210        return ret;
1211}
1212EXPORT_SYMBOL(dev_open);
1213
1214static int __dev_close_many(struct list_head *head)
1215{
1216        struct net_device *dev;
1217
1218        ASSERT_RTNL();
1219        might_sleep();
1220
1221        list_for_each_entry(dev, head, unreg_list) {
1222                call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1223
1224                clear_bit(__LINK_STATE_START, &dev->state);
1225
1226                /* Synchronize to scheduled poll. We cannot touch poll list, it
1227                 * can be even on different cpu. So just clear netif_running().
1228                 *
1229                 * dev->stop() will invoke napi_disable() on all of it's
1230                 * napi_struct instances on this device.
1231                 */
1232                smp_mb__after_clear_bit(); /* Commit netif_running(). */
1233        }
1234
1235        dev_deactivate_many(head);
1236
1237        list_for_each_entry(dev, head, unreg_list) {
1238                const struct net_device_ops *ops = dev->netdev_ops;
1239
1240                /*
1241                 *      Call the device specific close. This cannot fail.
1242                 *      Only if device is UP
1243                 *
1244                 *      We allow it to be called even after a DETACH hot-plug
1245                 *      event.
1246                 */
1247                if (ops->ndo_stop)
1248                        ops->ndo_stop(dev);
1249
1250                dev->flags &= ~IFF_UP;
1251                net_dmaengine_put();
1252        }
1253
1254        return 0;
1255}
1256
1257static int __dev_close(struct net_device *dev)
1258{
1259        int retval;
1260        LIST_HEAD(single);
1261
1262        list_add(&dev->unreg_list, &single);
1263        retval = __dev_close_many(&single);
1264        list_del(&single);
1265        return retval;
1266}
1267
1268static int dev_close_many(struct list_head *head)
1269{
1270        struct net_device *dev, *tmp;
1271        LIST_HEAD(tmp_list);
1272
1273        list_for_each_entry_safe(dev, tmp, head, unreg_list)
1274                if (!(dev->flags & IFF_UP))
1275                        list_move(&dev->unreg_list, &tmp_list);
1276
1277        __dev_close_many(head);
1278
1279        list_for_each_entry(dev, head, unreg_list) {
1280                rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1281                call_netdevice_notifiers(NETDEV_DOWN, dev);
1282        }
1283
1284        /* rollback_registered_many needs the complete original list */
1285        list_splice(&tmp_list, head);
1286        return 0;
1287}
1288
1289/**
1290 *      dev_close - shutdown an interface.
1291 *      @dev: device to shutdown
1292 *
1293 *      This function moves an active device into down state. A
1294 *      %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1295 *      is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1296 *      chain.
1297 */
1298int dev_close(struct net_device *dev)
1299{
1300        if (dev->flags & IFF_UP) {
1301                LIST_HEAD(single);
1302
1303                list_add(&dev->unreg_list, &single);
1304                dev_close_many(&single);
1305                list_del(&single);
1306        }
1307        return 0;
1308}
1309EXPORT_SYMBOL(dev_close);
1310
1311
1312/**
1313 *      dev_disable_lro - disable Large Receive Offload on a device
1314 *      @dev: device
1315 *
1316 *      Disable Large Receive Offload (LRO) on a net device.  Must be
1317 *      called under RTNL.  This is needed if received packets may be
1318 *      forwarded to another interface.
1319 */
1320void dev_disable_lro(struct net_device *dev)
1321{
1322        /*
1323         * If we're trying to disable lro on a vlan device
1324         * use the underlying physical device instead
1325         */
1326        if (is_vlan_dev(dev))
1327                dev = vlan_dev_real_dev(dev);
1328
1329        dev->wanted_features &= ~NETIF_F_LRO;
1330        netdev_update_features(dev);
1331
1332        if (unlikely(dev->features & NETIF_F_LRO))
1333                netdev_WARN(dev, "failed to disable LRO!\n");
1334}
1335EXPORT_SYMBOL(dev_disable_lro);
1336
1337
1338static int dev_boot_phase = 1;
1339
1340/**
1341 *      register_netdevice_notifier - register a network notifier block
1342 *      @nb: notifier
1343 *
1344 *      Register a notifier to be called when network device events occur.
1345 *      The notifier passed is linked into the kernel structures and must
1346 *      not be reused until it has been unregistered. A negative errno code
1347 *      is returned on a failure.
1348 *
1349 *      When registered all registration and up events are replayed
1350 *      to the new notifier to allow device to have a race free
1351 *      view of the network device list.
1352 */
1353
1354int register_netdevice_notifier(struct notifier_block *nb)
1355{
1356        struct net_device *dev;
1357        struct net_device *last;
1358        struct net *net;
1359        int err;
1360
1361        rtnl_lock();
1362        err = raw_notifier_chain_register(&netdev_chain, nb);
1363        if (err)
1364                goto unlock;
1365        if (dev_boot_phase)
1366                goto unlock;
1367        for_each_net(net) {
1368                for_each_netdev(net, dev) {
1369                        err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1370                        err = notifier_to_errno(err);
1371                        if (err)
1372                                goto rollback;
1373
1374                        if (!(dev->flags & IFF_UP))
1375                                continue;
1376
1377                        nb->notifier_call(nb, NETDEV_UP, dev);
1378                }
1379        }
1380
1381unlock:
1382        rtnl_unlock();
1383        return err;
1384
1385rollback:
1386        last = dev;
1387        for_each_net(net) {
1388                for_each_netdev(net, dev) {
1389                        if (dev == last)
1390                                goto outroll;
1391
1392                        if (dev->flags & IFF_UP) {
1393                                nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1394                                nb->notifier_call(nb, NETDEV_DOWN, dev);
1395                        }
1396                        nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1397                        nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1398                }
1399        }
1400
1401outroll:
1402        raw_notifier_chain_unregister(&netdev_chain, nb);
1403        goto unlock;
1404}
1405EXPORT_SYMBOL(register_netdevice_notifier);
1406
1407/**
1408 *      unregister_netdevice_notifier - unregister a network notifier block
1409 *      @nb: notifier
1410 *
1411 *      Unregister a notifier previously registered by
1412 *      register_netdevice_notifier(). The notifier is unlinked into the
1413 *      kernel structures and may then be reused. A negative errno code
1414 *      is returned on a failure.
1415 *
1416 *      After unregistering unregister and down device events are synthesized
1417 *      for all devices on the device list to the removed notifier to remove
1418 *      the need for special case cleanup code.
1419 */
1420
1421int unregister_netdevice_notifier(struct notifier_block *nb)
1422{
1423        struct net_device *dev;
1424        struct net *net;
1425        int err;
1426
1427        rtnl_lock();
1428        err = raw_notifier_chain_unregister(&netdev_chain, nb);
1429        if (err)
1430                goto unlock;
1431
1432        for_each_net(net) {
1433                for_each_netdev(net, dev) {
1434                        if (dev->flags & IFF_UP) {
1435                                nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1436                                nb->notifier_call(nb, NETDEV_DOWN, dev);
1437                        }
1438                        nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1439                        nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1440                }
1441        }
1442unlock:
1443        rtnl_unlock();
1444        return err;
1445}
1446EXPORT_SYMBOL(unregister_netdevice_notifier);
1447
1448/**
1449 *      call_netdevice_notifiers - call all network notifier blocks
1450 *      @val: value passed unmodified to notifier function
1451 *      @dev: net_device pointer passed unmodified to notifier function
1452 *
1453 *      Call all network notifier blocks.  Parameters and return value
1454 *      are as for raw_notifier_call_chain().
1455 */
1456
1457int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1458{
1459        ASSERT_RTNL();
1460        return raw_notifier_call_chain(&netdev_chain, val, dev);
1461}
1462EXPORT_SYMBOL(call_netdevice_notifiers);
1463
1464static struct static_key netstamp_needed __read_mostly;
1465#ifdef HAVE_JUMP_LABEL
1466/* We are not allowed to call static_key_slow_dec() from irq context
1467 * If net_disable_timestamp() is called from irq context, defer the
1468 * static_key_slow_dec() calls.
1469 */
1470static atomic_t netstamp_needed_deferred;
1471#endif
1472
1473void net_enable_timestamp(void)
1474{
1475#ifdef HAVE_JUMP_LABEL
1476        int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1477
1478        if (deferred) {
1479                while (--deferred)
1480                        static_key_slow_dec(&netstamp_needed);
1481                return;
1482        }
1483#endif
1484        WARN_ON(in_interrupt());
1485        static_key_slow_inc(&netstamp_needed);
1486}
1487EXPORT_SYMBOL(net_enable_timestamp);
1488
1489void net_disable_timestamp(void)
1490{
1491#ifdef HAVE_JUMP_LABEL
1492        if (in_interrupt()) {
1493                atomic_inc(&netstamp_needed_deferred);
1494                return;
1495        }
1496#endif
1497        static_key_slow_dec(&netstamp_needed);
1498}
1499EXPORT_SYMBOL(net_disable_timestamp);
1500
1501static inline void net_timestamp_set(struct sk_buff *skb)
1502{
1503        skb->tstamp.tv64 = 0;
1504        if (static_key_false(&netstamp_needed))
1505                __net_timestamp(skb);
1506}
1507
1508#define net_timestamp_check(COND, SKB)                  \
1509        if (static_key_false(&netstamp_needed)) {               \
1510                if ((COND) && !(SKB)->tstamp.tv64)      \
1511                        __net_timestamp(SKB);           \
1512        }                                               \
1513
1514static int net_hwtstamp_validate(struct ifreq *ifr)
1515{
1516        struct hwtstamp_config cfg;
1517        enum hwtstamp_tx_types tx_type;
1518        enum hwtstamp_rx_filters rx_filter;
1519        int tx_type_valid = 0;
1520        int rx_filter_valid = 0;
1521
1522        if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1523                return -EFAULT;
1524
1525        if (cfg.flags) /* reserved for future extensions */
1526                return -EINVAL;
1527
1528        tx_type = cfg.tx_type;
1529        rx_filter = cfg.rx_filter;
1530
1531        switch (tx_type) {
1532        case HWTSTAMP_TX_OFF:
1533        case HWTSTAMP_TX_ON:
1534        case HWTSTAMP_TX_ONESTEP_SYNC:
1535                tx_type_valid = 1;
1536                break;
1537        }
1538
1539        switch (rx_filter) {
1540        case HWTSTAMP_FILTER_NONE:
1541        case HWTSTAMP_FILTER_ALL:
1542        case HWTSTAMP_FILTER_SOME:
1543        case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1544        case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1545        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1546        case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1547        case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1548        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1549        case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1550        case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1551        case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1552        case HWTSTAMP_FILTER_PTP_V2_EVENT:
1553        case HWTSTAMP_FILTER_PTP_V2_SYNC:
1554        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1555                rx_filter_valid = 1;
1556                break;
1557        }
1558
1559        if (!tx_type_valid || !rx_filter_valid)
1560                return -ERANGE;
1561
1562        return 0;
1563}
1564
1565static inline bool is_skb_forwardable(struct net_device *dev,
1566                                      struct sk_buff *skb)
1567{
1568        unsigned int len;
1569
1570        if (!(dev->flags & IFF_UP))
1571                return false;
1572
1573        len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1574        if (skb->len <= len)
1575                return true;
1576
1577        /* if TSO is enabled, we don't care about the length as the packet
1578         * could be forwarded without being segmented before
1579         */
1580        if (skb_is_gso(skb))
1581                return true;
1582
1583        return false;
1584}
1585
1586/**
1587 * dev_forward_skb - loopback an skb to another netif
1588 *
1589 * @dev: destination network device
1590 * @skb: buffer to forward
1591 *
1592 * return values:
1593 *      NET_RX_SUCCESS  (no congestion)
1594 *      NET_RX_DROP     (packet was dropped, but freed)
1595 *
1596 * dev_forward_skb can be used for injecting an skb from the
1597 * start_xmit function of one device into the receive queue
1598 * of another device.
1599 *
1600 * The receiving device may be in another namespace, so
1601 * we have to clear all information in the skb that could
1602 * impact namespace isolation.
1603 */
1604int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1605{
1606        if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1607                if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1608                        atomic_long_inc(&dev->rx_dropped);
1609                        kfree_skb(skb);
1610                        return NET_RX_DROP;
1611                }
1612        }
1613
1614        skb_orphan(skb);
1615        nf_reset(skb);
1616
1617        if (unlikely(!is_skb_forwardable(dev, skb))) {
1618                atomic_long_inc(&dev->rx_dropped);
1619                kfree_skb(skb);
1620                return NET_RX_DROP;
1621        }
1622        skb->skb_iif = 0;
1623        skb->dev = dev;
1624        skb_dst_drop(skb);
1625        skb->tstamp.tv64 = 0;
1626        skb->pkt_type = PACKET_HOST;
1627        skb->protocol = eth_type_trans(skb, dev);
1628        skb->mark = 0;
1629        secpath_reset(skb);
1630        nf_reset(skb);
1631        return netif_rx(skb);
1632}
1633EXPORT_SYMBOL_GPL(dev_forward_skb);
1634
1635static inline int deliver_skb(struct sk_buff *skb,
1636                              struct packet_type *pt_prev,
1637                              struct net_device *orig_dev)
1638{
1639        if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1640                return -ENOMEM;
1641        atomic_inc(&skb->users);
1642        return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1643}
1644
1645static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1646{
1647        if (ptype->af_packet_priv == NULL)
1648                return false;
1649
1650        if (ptype->id_match)
1651                return ptype->id_match(ptype, skb->sk);
1652        else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1653                return true;
1654
1655        return false;
1656}
1657
1658/*
1659 *      Support routine. Sends outgoing frames to any network
1660 *      taps currently in use.
1661 */
1662
1663static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1664{
1665        struct packet_type *ptype;
1666        struct sk_buff *skb2 = NULL;
1667        struct packet_type *pt_prev = NULL;
1668
1669        rcu_read_lock();
1670        list_for_each_entry_rcu(ptype, &ptype_all, list) {
1671                /* Never send packets back to the socket
1672                 * they originated from - MvS (miquels@drinkel.ow.org)
1673                 */
1674                if ((ptype->dev == dev || !ptype->dev) &&
1675                    (!skb_loop_sk(ptype, skb))) {
1676                        if (pt_prev) {
1677                                deliver_skb(skb2, pt_prev, skb->dev);
1678                                pt_prev = ptype;
1679                                continue;
1680                        }
1681
1682                        skb2 = skb_clone(skb, GFP_ATOMIC);
1683                        if (!skb2)
1684                                break;
1685
1686                        net_timestamp_set(skb2);
1687
1688                        /* skb->nh should be correctly
1689                           set by sender, so that the second statement is
1690                           just protection against buggy protocols.
1691                         */
1692                        skb_reset_mac_header(skb2);
1693
1694                        if (skb_network_header(skb2) < skb2->data ||
1695                            skb2->network_header > skb2->tail) {
1696                                net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1697                                                     ntohs(skb2->protocol),
1698                                                     dev->name);
1699                                skb_reset_network_header(skb2);
1700                        }
1701
1702                        skb2->transport_header = skb2->network_header;
1703                        skb2->pkt_type = PACKET_OUTGOING;
1704                        pt_prev = ptype;
1705                }
1706        }
1707        if (pt_prev)
1708                pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1709        rcu_read_unlock();
1710}
1711
1712/**
1713 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1714 * @dev: Network device
1715 * @txq: number of queues available
1716 *
1717 * If real_num_tx_queues is changed the tc mappings may no longer be
1718 * valid. To resolve this verify the tc mapping remains valid and if
1719 * not NULL the mapping. With no priorities mapping to this
1720 * offset/count pair it will no longer be used. In the worst case TC0
1721 * is invalid nothing can be done so disable priority mappings. If is
1722 * expected that drivers will fix this mapping if they can before
1723 * calling netif_set_real_num_tx_queues.
1724 */
1725static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1726{
1727        int i;
1728        struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1729
1730        /* If TC0 is invalidated disable TC mapping */
1731        if (tc->offset + tc->count > txq) {
1732                pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1733                dev->num_tc = 0;
1734                return;
1735        }
1736
1737        /* Invalidated prio to tc mappings set to TC0 */
1738        for (i = 1; i < TC_BITMASK + 1; i++) {
1739                int q = netdev_get_prio_tc_map(dev, i);
1740
1741                tc = &dev->tc_to_txq[q];
1742                if (tc->offset + tc->count > txq) {
1743                        pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1744                                i, q);
1745                        netdev_set_prio_tc_map(dev, i, 0);
1746                }
1747        }
1748}
1749
1750/*
1751 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1752 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1753 */
1754int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1755{
1756        int rc;
1757
1758        if (txq < 1 || txq > dev->num_tx_queues)
1759                return -EINVAL;
1760
1761        if (dev->reg_state == NETREG_REGISTERED ||
1762            dev->reg_state == NETREG_UNREGISTERING) {
1763                ASSERT_RTNL();
1764
1765                rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
1766                                                  txq);
1767                if (rc)
1768                        return rc;
1769
1770                if (dev->num_tc)
1771                        netif_setup_tc(dev, txq);
1772
1773                if (txq < dev->real_num_tx_queues)
1774                        qdisc_reset_all_tx_gt(dev, txq);
1775        }
1776
1777        dev->real_num_tx_queues = txq;
1778        return 0;
1779}
1780EXPORT_SYMBOL(netif_set_real_num_tx_queues);
1781
1782#ifdef CONFIG_RPS
1783/**
1784 *      netif_set_real_num_rx_queues - set actual number of RX queues used
1785 *      @dev: Network device
1786 *      @rxq: Actual number of RX queues
1787 *
1788 *      This must be called either with the rtnl_lock held or before
1789 *      registration of the net device.  Returns 0 on success, or a
1790 *      negative error code.  If called before registration, it always
1791 *      succeeds.
1792 */
1793int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
1794{
1795        int rc;
1796
1797        if (rxq < 1 || rxq > dev->num_rx_queues)
1798                return -EINVAL;
1799
1800        if (dev->reg_state == NETREG_REGISTERED) {
1801                ASSERT_RTNL();
1802
1803                rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
1804                                                  rxq);
1805                if (rc)
1806                        return rc;
1807        }
1808
1809        dev->real_num_rx_queues = rxq;
1810        return 0;
1811}
1812EXPORT_SYMBOL(netif_set_real_num_rx_queues);
1813#endif
1814
1815/**
1816 * netif_get_num_default_rss_queues - default number of RSS queues
1817 *
1818 * This routine should set an upper limit on the number of RSS queues
1819 * used by default by multiqueue devices.
1820 */
1821int netif_get_num_default_rss_queues(void)
1822{
1823        return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
1824}
1825EXPORT_SYMBOL(netif_get_num_default_rss_queues);
1826
1827static inline void __netif_reschedule(struct Qdisc *q)
1828{
1829        struct softnet_data *sd;
1830        unsigned long flags;
1831
1832        local_irq_save(flags);
1833        sd = &__get_cpu_var(softnet_data);
1834        q->next_sched = NULL;
1835        *sd->output_queue_tailp = q;
1836        sd->output_queue_tailp = &q->next_sched;
1837        raise_softirq_irqoff(NET_TX_SOFTIRQ);
1838        local_irq_restore(flags);
1839}
1840
1841void __netif_schedule(struct Qdisc *q)
1842{
1843        if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1844                __netif_reschedule(q);
1845}
1846EXPORT_SYMBOL(__netif_schedule);
1847
1848void dev_kfree_skb_irq(struct sk_buff *skb)
1849{
1850        if (atomic_dec_and_test(&skb->users)) {
1851                struct softnet_data *sd;
1852                unsigned long flags;
1853
1854                local_irq_save(flags);
1855                sd = &__get_cpu_var(softnet_data);
1856                skb->next = sd->completion_queue;
1857                sd->completion_queue = skb;
1858                raise_softirq_irqoff(NET_TX_SOFTIRQ);
1859                local_irq_restore(flags);
1860        }
1861}
1862EXPORT_SYMBOL(dev_kfree_skb_irq);
1863
1864void dev_kfree_skb_any(struct sk_buff *skb)
1865{
1866        if (in_irq() || irqs_disabled())
1867                dev_kfree_skb_irq(skb);
1868        else
1869                dev_kfree_skb(skb);
1870}
1871EXPORT_SYMBOL(dev_kfree_skb_any);
1872
1873
1874/**
1875 * netif_device_detach - mark device as removed
1876 * @dev: network device
1877 *
1878 * Mark device as removed from system and therefore no longer available.
1879 */
1880void netif_device_detach(struct net_device *dev)
1881{
1882        if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1883            netif_running(dev)) {
1884                netif_tx_stop_all_queues(dev);
1885        }
1886}
1887EXPORT_SYMBOL(netif_device_detach);
1888
1889/**
1890 * netif_device_attach - mark device as attached
1891 * @dev: network device
1892 *
1893 * Mark device as attached from system and restart if needed.
1894 */
1895void netif_device_attach(struct net_device *dev)
1896{
1897        if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1898            netif_running(dev)) {
1899                netif_tx_wake_all_queues(dev);
1900                __netdev_watchdog_up(dev);
1901        }
1902}
1903EXPORT_SYMBOL(netif_device_attach);
1904
1905static void skb_warn_bad_offload(const struct sk_buff *skb)
1906{
1907        static const netdev_features_t null_features = 0;
1908        struct net_device *dev = skb->dev;
1909        const char *driver = "";
1910
1911        if (dev && dev->dev.parent)
1912                driver = dev_driver_string(dev->dev.parent);
1913
1914        WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
1915             "gso_type=%d ip_summed=%d\n",
1916             driver, dev ? &dev->features : &null_features,
1917             skb->sk ? &skb->sk->sk_route_caps : &null_features,
1918             skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
1919             skb_shinfo(skb)->gso_type, skb->ip_summed);
1920}
1921
1922/*
1923 * Invalidate hardware checksum when packet is to be mangled, and
1924 * complete checksum manually on outgoing path.
1925 */
1926int skb_checksum_help(struct sk_buff *skb)
1927{
1928        __wsum csum;
1929        int ret = 0, offset;
1930
1931        if (skb->ip_summed == CHECKSUM_COMPLETE)
1932                goto out_set_summed;
1933
1934        if (unlikely(skb_shinfo(skb)->gso_size)) {
1935                skb_warn_bad_offload(skb);
1936                return -EINVAL;
1937        }
1938
1939        offset = skb_checksum_start_offset(skb);
1940        BUG_ON(offset >= skb_headlen(skb));
1941        csum = skb_checksum(skb, offset, skb->len - offset, 0);
1942
1943        offset += skb->csum_offset;
1944        BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1945
1946        if (skb_cloned(skb) &&
1947            !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1948                ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1949                if (ret)
1950                        goto out;
1951        }
1952
1953        *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1954out_set_summed:
1955        skb->ip_summed = CHECKSUM_NONE;
1956out:
1957        return ret;
1958}
1959EXPORT_SYMBOL(skb_checksum_help);
1960
1961/**
1962 *      skb_gso_segment - Perform segmentation on skb.
1963 *      @skb: buffer to segment
1964 *      @features: features for the output path (see dev->features)
1965 *
1966 *      This function segments the given skb and returns a list of segments.
1967 *
1968 *      It may return NULL if the skb requires no segmentation.  This is
1969 *      only possible when GSO is used for verifying header integrity.
1970 */
1971struct sk_buff *skb_gso_segment(struct sk_buff *skb,
1972        netdev_features_t features)
1973{
1974        struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1975        struct packet_type *ptype;
1976        __be16 type = skb->protocol;
1977        int vlan_depth = ETH_HLEN;
1978        int err;
1979
1980        while (type == htons(ETH_P_8021Q)) {
1981                struct vlan_hdr *vh;
1982
1983                if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
1984                        return ERR_PTR(-EINVAL);
1985
1986                vh = (struct vlan_hdr *)(skb->data + vlan_depth);
1987                type = vh->h_vlan_encapsulated_proto;
1988                vlan_depth += VLAN_HLEN;
1989        }
1990
1991        skb_reset_mac_header(skb);
1992        skb->mac_len = skb->network_header - skb->mac_header;
1993        __skb_pull(skb, skb->mac_len);
1994
1995        if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1996                skb_warn_bad_offload(skb);
1997
1998                if (skb_header_cloned(skb) &&
1999                    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2000                        return ERR_PTR(err);
2001        }
2002
2003        rcu_read_lock();
2004        list_for_each_entry_rcu(ptype,
2005                        &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2006                if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
2007                        if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2008                                err = ptype->gso_send_check(skb);
2009                                segs = ERR_PTR(err);
2010                                if (err || skb_gso_ok(skb, features))
2011                                        break;
2012                                __skb_push(skb, (skb->data -
2013                                                 skb_network_header(skb)));
2014                        }
2015                        segs = ptype->gso_segment(skb, features);
2016                        break;
2017                }
2018        }
2019        rcu_read_unlock();
2020
2021        __skb_push(skb, skb->data - skb_mac_header(skb));
2022
2023        return segs;
2024}
2025EXPORT_SYMBOL(skb_gso_segment);
2026
2027/* Take action when hardware reception checksum errors are detected. */
2028#ifdef CONFIG_BUG
2029void netdev_rx_csum_fault(struct net_device *dev)
2030{
2031        if (net_ratelimit()) {
2032                pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2033                dump_stack();
2034        }
2035}
2036EXPORT_SYMBOL(netdev_rx_csum_fault);
2037#endif
2038
2039/* Actually, we should eliminate this check as soon as we know, that:
2040 * 1. IOMMU is present and allows to map all the memory.
2041 * 2. No high memory really exists on this machine.
2042 */
2043
2044static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2045{
2046#ifdef CONFIG_HIGHMEM
2047        int i;
2048        if (!(dev->features & NETIF_F_HIGHDMA)) {
2049                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2050                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2051                        if (PageHighMem(skb_frag_page(frag)))
2052                                return 1;
2053                }
2054        }
2055
2056        if (PCI_DMA_BUS_IS_PHYS) {
2057                struct device *pdev = dev->dev.parent;
2058
2059                if (!pdev)
2060                        return 0;
2061                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2062                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2063                        dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2064                        if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2065                                return 1;
2066                }
2067        }
2068#endif
2069        return 0;
2070}
2071
2072struct dev_gso_cb {
2073        void (*destructor)(struct sk_buff *skb);
2074};
2075
2076#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2077
2078static void dev_gso_skb_destructor(struct sk_buff *skb)
2079{
2080        struct dev_gso_cb *cb;
2081
2082        do {
2083                struct sk_buff *nskb = skb->next;
2084
2085                skb->next = nskb->next;
2086                nskb->next = NULL;
2087                kfree_skb(nskb);
2088        } while (skb->next);
2089
2090        cb = DEV_GSO_CB(skb);
2091        if (cb->destructor)
2092                cb->destructor(skb);
2093}
2094
2095/**
2096 *      dev_gso_segment - Perform emulated hardware segmentation on skb.
2097 *      @skb: buffer to segment
2098 *      @features: device features as applicable to this skb
2099 *
2100 *      This function segments the given skb and stores the list of segments
2101 *      in skb->next.
2102 */
2103static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2104{
2105        struct sk_buff *segs;
2106
2107        segs = skb_gso_segment(skb, features);
2108
2109        /* Verifying header integrity only. */
2110        if (!segs)
2111                return 0;
2112
2113        if (IS_ERR(segs))
2114                return PTR_ERR(segs);
2115
2116        skb->next = segs;
2117        DEV_GSO_CB(skb)->destructor = skb->destructor;
2118        skb->destructor = dev_gso_skb_destructor;
2119
2120        return 0;
2121}
2122
2123static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
2124{
2125        return ((features & NETIF_F_GEN_CSUM) ||
2126                ((features & NETIF_F_V4_CSUM) &&
2127                 protocol == htons(ETH_P_IP)) ||
2128                ((features & NETIF_F_V6_CSUM) &&
2129                 protocol == htons(ETH_P_IPV6)) ||
2130                ((features & NETIF_F_FCOE_CRC) &&
2131                 protocol == htons(ETH_P_FCOE)));
2132}
2133
2134static netdev_features_t harmonize_features(struct sk_buff *skb,
2135        __be16 protocol, netdev_features_t features)
2136{
2137        if (skb->ip_summed != CHECKSUM_NONE &&
2138            !can_checksum_protocol(features, protocol)) {
2139                features &= ~NETIF_F_ALL_CSUM;
2140                features &= ~NETIF_F_SG;
2141        } else if (illegal_highdma(skb->dev, skb)) {
2142                features &= ~NETIF_F_SG;
2143        }
2144
2145        return features;
2146}
2147
2148netdev_features_t netif_skb_features(struct sk_buff *skb)
2149{
2150        __be16 protocol = skb->protocol;
2151        netdev_features_t features = skb->dev->features;
2152
2153        if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2154                features &= ~NETIF_F_GSO_MASK;
2155
2156        if (protocol == htons(ETH_P_8021Q)) {
2157                struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2158                protocol = veh->h_vlan_encapsulated_proto;
2159        } else if (!vlan_tx_tag_present(skb)) {
2160                return harmonize_features(skb, protocol, features);
2161        }
2162
2163        features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
2164
2165        if (protocol != htons(ETH_P_8021Q)) {
2166                return harmonize_features(skb, protocol, features);
2167        } else {
2168                features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
2169                                NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
2170                return harmonize_features(skb, protocol, features);
2171        }
2172}
2173EXPORT_SYMBOL(netif_skb_features);
2174
2175/*
2176 * Returns true if either:
2177 *      1. skb has frag_list and the device doesn't support FRAGLIST, or
2178 *      2. skb is fragmented and the device does not support SG, or if
2179 *         at least one of fragments is in highmem and device does not
2180 *         support DMA from it.
2181 */
2182static inline int skb_needs_linearize(struct sk_buff *skb,
2183                                      int features)
2184{
2185        return skb_is_nonlinear(skb) &&
2186                        ((skb_has_frag_list(skb) &&
2187                                !(features & NETIF_F_FRAGLIST)) ||
2188                        (skb_shinfo(skb)->nr_frags &&
2189                                !(features & NETIF_F_SG)));
2190}
2191
2192int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2193                        struct netdev_queue *txq)
2194{
2195        const struct net_device_ops *ops = dev->netdev_ops;
2196        int rc = NETDEV_TX_OK;
2197        unsigned int skb_len;
2198
2199        if (likely(!skb->next)) {
2200                netdev_features_t features;
2201
2202                /*
2203                 * If device doesn't need skb->dst, release it right now while
2204                 * its hot in this cpu cache
2205                 */
2206                if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2207                        skb_dst_drop(skb);
2208
2209                if (!list_empty(&ptype_all))
2210                        dev_queue_xmit_nit(skb, dev);
2211
2212                features = netif_skb_features(skb);
2213
2214                if (vlan_tx_tag_present(skb) &&
2215                    !(features & NETIF_F_HW_VLAN_TX)) {
2216                        skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2217                        if (unlikely(!skb))
2218                                goto out;
2219
2220                        skb->vlan_tci = 0;
2221                }
2222
2223                if (netif_needs_gso(skb, features)) {
2224                        if (unlikely(dev_gso_segment(skb, features)))
2225                                goto out_kfree_skb;
2226                        if (skb->next)
2227                                goto gso;
2228                } else {
2229                        if (skb_needs_linearize(skb, features) &&
2230                            __skb_linearize(skb))
2231                                goto out_kfree_skb;
2232
2233                        /* If packet is not checksummed and device does not
2234                         * support checksumming for this protocol, complete
2235                         * checksumming here.
2236                         */
2237                        if (skb->ip_summed == CHECKSUM_PARTIAL) {
2238                                skb_set_transport_header(skb,
2239                                        skb_checksum_start_offset(skb));
2240                                if (!(features & NETIF_F_ALL_CSUM) &&
2241                                     skb_checksum_help(skb))
2242                                        goto out_kfree_skb;
2243                        }
2244                }
2245
2246                skb_len = skb->len;
2247                rc = ops->ndo_start_xmit(skb, dev);
2248                trace_net_dev_xmit(skb, rc, dev, skb_len);
2249                if (rc == NETDEV_TX_OK)
2250                        txq_trans_update(txq);
2251                return rc;
2252        }
2253
2254gso:
2255        do {
2256                struct sk_buff *nskb = skb->next;
2257
2258                skb->next = nskb->next;
2259                nskb->next = NULL;
2260
2261                /*
2262                 * If device doesn't need nskb->dst, release it right now while
2263                 * its hot in this cpu cache
2264                 */
2265                if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2266                        skb_dst_drop(nskb);
2267
2268                skb_len = nskb->len;
2269                rc = ops->ndo_start_xmit(nskb, dev);
2270                trace_net_dev_xmit(nskb, rc, dev, skb_len);
2271                if (unlikely(rc != NETDEV_TX_OK)) {
2272                        if (rc & ~NETDEV_TX_MASK)
2273                                goto out_kfree_gso_skb;
2274                        nskb->next = skb->next;
2275                        skb->next = nskb;
2276                        return rc;
2277                }
2278                txq_trans_update(txq);
2279                if (unlikely(netif_xmit_stopped(txq) && skb->next))
2280                        return NETDEV_TX_BUSY;
2281        } while (skb->next);
2282
2283out_kfree_gso_skb:
2284        if (likely(skb->next == NULL))
2285                skb->destructor = DEV_GSO_CB(skb)->destructor;
2286out_kfree_skb:
2287        kfree_skb(skb);
2288out:
2289        return rc;
2290}
2291
2292static u32 hashrnd __read_mostly;
2293
2294/*
2295 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2296 * to be used as a distribution range.
2297 */
2298u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2299                  unsigned int num_tx_queues)
2300{
2301        u32 hash;
2302        u16 qoffset = 0;
2303        u16 qcount = num_tx_queues;
2304
2305        if (skb_rx_queue_recorded(skb)) {
2306                hash = skb_get_rx_queue(skb);
2307                while (unlikely(hash >= num_tx_queues))
2308                        hash -= num_tx_queues;
2309                return hash;
2310        }
2311
2312        if (dev->num_tc) {
2313                u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2314                qoffset = dev->tc_to_txq[tc].offset;
2315                qcount = dev->tc_to_txq[tc].count;
2316        }
2317
2318        if (skb->sk && skb->sk->sk_hash)
2319                hash = skb->sk->sk_hash;
2320        else
2321                hash = (__force u16) skb->protocol;
2322        hash = jhash_1word(hash, hashrnd);
2323
2324        return (u16) (((u64) hash * qcount) >> 32) + qoffset;
2325}
2326EXPORT_SYMBOL(__skb_tx_hash);
2327
2328static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2329{
2330        if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2331                net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2332                                     dev->name, queue_index,
2333                                     dev->real_num_tx_queues);
2334                return 0;
2335        }
2336        return queue_index;
2337}
2338
2339static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2340{
2341#ifdef CONFIG_XPS
2342        struct xps_dev_maps *dev_maps;
2343        struct xps_map *map;
2344        int queue_index = -1;
2345
2346        rcu_read_lock();
2347        dev_maps = rcu_dereference(dev->xps_maps);
2348        if (dev_maps) {
2349                map = rcu_dereference(
2350                    dev_maps->cpu_map[raw_smp_processor_id()]);
2351                if (map) {
2352                        if (map->len == 1)
2353                                queue_index = map->queues[0];
2354                        else {
2355                                u32 hash;
2356                                if (skb->sk && skb->sk->sk_hash)
2357                                        hash = skb->sk->sk_hash;
2358                                else
2359                                        hash = (__force u16) skb->protocol ^
2360                                            skb->rxhash;
2361                                hash = jhash_1word(hash, hashrnd);
2362                                queue_index = map->queues[
2363                                    ((u64)hash * map->len) >> 32];
2364                        }
2365                        if (unlikely(queue_index >= dev->real_num_tx_queues))
2366                                queue_index = -1;
2367                }
2368        }
2369        rcu_read_unlock();
2370
2371        return queue_index;
2372#else
2373        return -1;
2374#endif
2375}
2376
2377static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2378                                        struct sk_buff *skb)
2379{
2380        int queue_index;
2381        const struct net_device_ops *ops = dev->netdev_ops;
2382
2383        if (dev->real_num_tx_queues == 1)
2384                queue_index = 0;
2385        else if (ops->ndo_select_queue) {
2386                queue_index = ops->ndo_select_queue(dev, skb);
2387                queue_index = dev_cap_txqueue(dev, queue_index);
2388        } else {
2389                struct sock *sk = skb->sk;
2390                queue_index = sk_tx_queue_get(sk);
2391
2392                if (queue_index < 0 || skb->ooo_okay ||
2393                    queue_index >= dev->real_num_tx_queues) {
2394                        int old_index = queue_index;
2395
2396                        queue_index = get_xps_queue(dev, skb);
2397                        if (queue_index < 0)
2398                                queue_index = skb_tx_hash(dev, skb);
2399
2400                        if (queue_index != old_index && sk) {
2401                                struct dst_entry *dst =
2402                                    rcu_dereference_check(sk->sk_dst_cache, 1);
2403
2404                                if (dst && skb_dst(skb) == dst)
2405                                        sk_tx_queue_set(sk, queue_index);
2406                        }
2407                }
2408        }
2409
2410        skb_set_queue_mapping(skb, queue_index);
2411        return netdev_get_tx_queue(dev, queue_index);
2412}
2413
2414static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2415                                 struct net_device *dev,
2416                                 struct netdev_queue *txq)
2417{
2418        spinlock_t *root_lock = qdisc_lock(q);
2419        bool contended;
2420        int rc;
2421
2422        qdisc_skb_cb(skb)->pkt_len = skb->len;
2423        qdisc_calculate_pkt_len(skb, q);
2424        /*
2425         * Heuristic to force contended enqueues to serialize on a
2426         * separate lock before trying to get qdisc main lock.
2427         * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2428         * and dequeue packets faster.
2429         */
2430        contended = qdisc_is_running(q);
2431        if (unlikely(contended))
2432                spin_lock(&q->busylock);
2433
2434        spin_lock(root_lock);
2435        if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2436                kfree_skb(skb);
2437                rc = NET_XMIT_DROP;
2438        } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2439                   qdisc_run_begin(q)) {
2440                /*
2441                 * This is a work-conserving queue; there are no old skbs
2442                 * waiting to be sent out; and the qdisc is not running -
2443                 * xmit the skb directly.
2444                 */
2445                if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2446                        skb_dst_force(skb);
2447
2448                qdisc_bstats_update(q, skb);
2449
2450                if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2451                        if (unlikely(contended)) {
2452                                spin_unlock(&q->busylock);
2453                                contended = false;
2454                        }
2455                        __qdisc_run(q);
2456                } else
2457                        qdisc_run_end(q);
2458
2459                rc = NET_XMIT_SUCCESS;
2460        } else {
2461                skb_dst_force(skb);
2462                rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2463                if (qdisc_run_begin(q)) {
2464                        if (unlikely(contended)) {
2465                                spin_unlock(&q->busylock);
2466                                contended = false;
2467                        }
2468                        __qdisc_run(q);
2469                }
2470        }
2471        spin_unlock(root_lock);
2472        if (unlikely(contended))
2473                spin_unlock(&q->busylock);
2474        return rc;
2475}
2476
2477#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2478static void skb_update_prio(struct sk_buff *skb)
2479{
2480        struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2481
2482        if (!skb->priority && skb->sk && map) {
2483                unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2484
2485                if (prioidx < map->priomap_len)
2486                        skb->priority = map->priomap[prioidx];
2487        }
2488}
2489#else
2490#define skb_update_prio(skb)
2491#endif
2492
2493static DEFINE_PER_CPU(int, xmit_recursion);
2494#define RECURSION_LIMIT 10
2495
2496/**
2497 *      dev_loopback_xmit - loop back @skb
2498 *      @skb: buffer to transmit
2499 */
2500int dev_loopback_xmit(struct sk_buff *skb)
2501{
2502        skb_reset_mac_header(skb);
2503        __skb_pull(skb, skb_network_offset(skb));
2504        skb->pkt_type = PACKET_LOOPBACK;
2505        skb->ip_summed = CHECKSUM_UNNECESSARY;
2506        WARN_ON(!skb_dst(skb));
2507        skb_dst_force(skb);
2508        netif_rx_ni(skb);
2509        return 0;
2510}
2511EXPORT_SYMBOL(dev_loopback_xmit);
2512
2513/**
2514 *      dev_queue_xmit - transmit a buffer
2515 *      @skb: buffer to transmit
2516 *
2517 *      Queue a buffer for transmission to a network device. The caller must
2518 *      have set the device and priority and built the buffer before calling
2519 *      this function. The function can be called from an interrupt.
2520 *
2521 *      A negative errno code is returned on a failure. A success does not
2522 *      guarantee the frame will be transmitted as it may be dropped due
2523 *      to congestion or traffic shaping.
2524 *
2525 * -----------------------------------------------------------------------------------
2526 *      I notice this method can also return errors from the queue disciplines,
2527 *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
2528 *      be positive.
2529 *
2530 *      Regardless of the return value, the skb is consumed, so it is currently
2531 *      difficult to retry a send to this method.  (You can bump the ref count
2532 *      before sending to hold a reference for retry if you are careful.)
2533 *
2534 *      When calling this method, interrupts MUST be enabled.  This is because
2535 *      the BH enable code must have IRQs enabled so that it will not deadlock.
2536 *          --BLG
2537 */
2538int dev_queue_xmit(struct sk_buff *skb)
2539{
2540        struct net_device *dev = skb->dev;
2541        struct netdev_queue *txq;
2542        struct Qdisc *q;
2543        int rc = -ENOMEM;
2544
2545        /* Disable soft irqs for various locks below. Also
2546         * stops preemption for RCU.
2547         */
2548        rcu_read_lock_bh();
2549
2550        skb_update_prio(skb);
2551
2552        txq = dev_pick_tx(dev, skb);
2553        q = rcu_dereference_bh(txq->qdisc);
2554
2555#ifdef CONFIG_NET_CLS_ACT
2556        skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2557#endif
2558        trace_net_dev_queue(skb);
2559        if (q->enqueue) {
2560                rc = __dev_xmit_skb(skb, q, dev, txq);
2561                goto out;
2562        }
2563
2564        /* The device has no queue. Common case for software devices:
2565           loopback, all the sorts of tunnels...
2566
2567           Really, it is unlikely that netif_tx_lock protection is necessary
2568           here.  (f.e. loopback and IP tunnels are clean ignoring statistics
2569           counters.)
2570           However, it is possible, that they rely on protection
2571           made by us here.
2572
2573           Check this and shot the lock. It is not prone from deadlocks.
2574           Either shot noqueue qdisc, it is even simpler 8)
2575         */
2576        if (dev->flags & IFF_UP) {
2577                int cpu = smp_processor_id(); /* ok because BHs are off */
2578
2579                if (txq->xmit_lock_owner != cpu) {
2580
2581                        if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2582                                goto recursion_alert;
2583
2584                        HARD_TX_LOCK(dev, txq, cpu);
2585
2586                        if (!netif_xmit_stopped(txq)) {
2587                                __this_cpu_inc(xmit_recursion);
2588                                rc = dev_hard_start_xmit(skb, dev, txq);
2589                                __this_cpu_dec(xmit_recursion);
2590                                if (dev_xmit_complete(rc)) {
2591                                        HARD_TX_UNLOCK(dev, txq);
2592                                        goto out;
2593                                }
2594                        }
2595                        HARD_TX_UNLOCK(dev, txq);
2596                        net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2597                                             dev->name);
2598                } else {
2599                        /* Recursion is detected! It is possible,
2600                         * unfortunately
2601                         */
2602recursion_alert:
2603                        net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2604                                             dev->name);
2605                }
2606        }
2607
2608        rc = -ENETDOWN;
2609        rcu_read_unlock_bh();
2610
2611        kfree_skb(skb);
2612        return rc;
2613out:
2614        rcu_read_unlock_bh();
2615        return rc;
2616}
2617EXPORT_SYMBOL(dev_queue_xmit);
2618
2619
2620/*=======================================================================
2621                        Receiver routines
2622  =======================================================================*/
2623
2624int netdev_max_backlog __read_mostly = 1000;
2625int netdev_tstamp_prequeue __read_mostly = 1;
2626int netdev_budget __read_mostly = 300;
2627int weight_p __read_mostly = 64;            /* old backlog weight */
2628
2629/* Called with irq disabled */
2630static inline void ____napi_schedule(struct softnet_data *sd,
2631                                     struct napi_struct *napi)
2632{
2633        list_add_tail(&napi->poll_list, &sd->poll_list);
2634        __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2635}
2636
2637/*
2638 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2639 * and src/dst port numbers.  Sets rxhash in skb to non-zero hash value
2640 * on success, zero indicates no valid hash.  Also, sets l4_rxhash in skb
2641 * if hash is a canonical 4-tuple hash over transport ports.
2642 */
2643void __skb_get_rxhash(struct sk_buff *skb)
2644{
2645        struct flow_keys keys;
2646        u32 hash;
2647
2648        if (!skb_flow_dissect(skb, &keys))
2649                return;
2650
2651        if (keys.ports)
2652                skb->l4_rxhash = 1;
2653
2654        /* get a consistent hash (same value on both flow directions) */
2655        if (((__force u32)keys.dst < (__force u32)keys.src) ||
2656            (((__force u32)keys.dst == (__force u32)keys.src) &&
2657             ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
2658                swap(keys.dst, keys.src);
2659                swap(keys.port16[0], keys.port16[1]);
2660        }
2661
2662        hash = jhash_3words((__force u32)keys.dst,
2663                            (__force u32)keys.src,
2664                            (__force u32)keys.ports, hashrnd);
2665        if (!hash)
2666                hash = 1;
2667
2668        skb->rxhash = hash;
2669}
2670EXPORT_SYMBOL(__skb_get_rxhash);
2671
2672#ifdef CONFIG_RPS
2673
2674/* One global table that all flow-based protocols share. */
2675struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2676EXPORT_SYMBOL(rps_sock_flow_table);
2677
2678struct static_key rps_needed __read_mostly;
2679
2680static struct rps_dev_flow *
2681set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2682            struct rps_dev_flow *rflow, u16 next_cpu)
2683{
2684        if (next_cpu != RPS_NO_CPU) {
2685#ifdef CONFIG_RFS_ACCEL
2686                struct netdev_rx_queue *rxqueue;
2687                struct rps_dev_flow_table *flow_table;
2688                struct rps_dev_flow *old_rflow;
2689                u32 flow_id;
2690                u16 rxq_index;
2691                int rc;
2692
2693                /* Should we steer this flow to a different hardware queue? */
2694                if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2695                    !(dev->features & NETIF_F_NTUPLE))
2696                        goto out;
2697                rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2698                if (rxq_index == skb_get_rx_queue(skb))
2699                        goto out;
2700
2701                rxqueue = dev->_rx + rxq_index;
2702                flow_table = rcu_dereference(rxqueue->rps_flow_table);
2703                if (!flow_table)
2704                        goto out;
2705                flow_id = skb->rxhash & flow_table->mask;
2706                rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2707                                                        rxq_index, flow_id);
2708                if (rc < 0)
2709                        goto out;
2710                old_rflow = rflow;
2711                rflow = &flow_table->flows[flow_id];
2712                rflow->filter = rc;
2713                if (old_rflow->filter == rflow->filter)
2714                        old_rflow->filter = RPS_NO_FILTER;
2715        out:
2716#endif
2717                rflow->last_qtail =
2718                        per_cpu(softnet_data, next_cpu).input_queue_head;
2719        }
2720
2721        rflow->cpu = next_cpu;
2722        return rflow;
2723}
2724
2725/*
2726 * get_rps_cpu is called from netif_receive_skb and returns the target
2727 * CPU from the RPS map of the receiving queue for a given skb.
2728 * rcu_read_lock must be held on entry.
2729 */
2730static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2731                       struct rps_dev_flow **rflowp)
2732{
2733        struct netdev_rx_queue *rxqueue;
2734        struct rps_map *map;
2735        struct rps_dev_flow_table *flow_table;
2736        struct rps_sock_flow_table *sock_flow_table;
2737        int cpu = -1;
2738        u16 tcpu;
2739
2740        if (skb_rx_queue_recorded(skb)) {
2741                u16 index = skb_get_rx_queue(skb);
2742                if (unlikely(index >= dev->real_num_rx_queues)) {
2743                        WARN_ONCE(dev->real_num_rx_queues > 1,
2744                                  "%s received packet on queue %u, but number "
2745                                  "of RX queues is %u\n",
2746                                  dev->name, index, dev->real_num_rx_queues);
2747                        goto done;
2748                }
2749                rxqueue = dev->_rx + index;
2750        } else
2751                rxqueue = dev->_rx;
2752
2753        map = rcu_dereference(rxqueue->rps_map);
2754        if (map) {
2755                if (map->len == 1 &&
2756                    !rcu_access_pointer(rxqueue->rps_flow_table)) {
2757                        tcpu = map->cpus[0];
2758                        if (cpu_online(tcpu))
2759                                cpu = tcpu;
2760                        goto done;
2761                }
2762        } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
2763                goto done;
2764        }
2765
2766        skb_reset_network_header(skb);
2767        if (!skb_get_rxhash(skb))
2768                goto done;
2769
2770        flow_table = rcu_dereference(rxqueue->rps_flow_table);
2771        sock_flow_table = rcu_dereference(rps_sock_flow_table);
2772        if (flow_table && sock_flow_table) {
2773                u16 next_cpu;
2774                struct rps_dev_flow *rflow;
2775
2776                rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2777                tcpu = rflow->cpu;
2778
2779                next_cpu = sock_flow_table->ents[skb->rxhash &
2780                    sock_flow_table->mask];
2781
2782                /*
2783                 * If the desired CPU (where last recvmsg was done) is
2784                 * different from current CPU (one in the rx-queue flow
2785                 * table entry), switch if one of the following holds:
2786                 *   - Current CPU is unset (equal to RPS_NO_CPU).
2787                 *   - Current CPU is offline.
2788                 *   - The current CPU's queue tail has advanced beyond the
2789                 *     last packet that was enqueued using this table entry.
2790                 *     This guarantees that all previous packets for the flow
2791                 *     have been dequeued, thus preserving in order delivery.
2792                 */
2793                if (unlikely(tcpu != next_cpu) &&
2794                    (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2795                     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2796                      rflow->last_qtail)) >= 0))
2797                        rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
2798
2799                if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2800                        *rflowp = rflow;
2801                        cpu = tcpu;
2802                        goto done;
2803                }
2804        }
2805
2806        if (map) {
2807                tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
2808
2809                if (cpu_online(tcpu)) {
2810                        cpu = tcpu;
2811                        goto done;
2812                }
2813        }
2814
2815done:
2816        return cpu;
2817}
2818
2819#ifdef CONFIG_RFS_ACCEL
2820
2821/**
2822 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
2823 * @dev: Device on which the filter was set
2824 * @rxq_index: RX queue index
2825 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
2826 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
2827 *
2828 * Drivers that implement ndo_rx_flow_steer() should periodically call
2829 * this function for each installed filter and remove the filters for
2830 * which it returns %true.
2831 */
2832bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
2833                         u32 flow_id, u16 filter_id)
2834{
2835        struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
2836        struct rps_dev_flow_table *flow_table;
2837        struct rps_dev_flow *rflow;
2838        bool expire = true;
2839        int cpu;
2840
2841        rcu_read_lock();
2842        flow_table = rcu_dereference(rxqueue->rps_flow_table);
2843        if (flow_table && flow_id <= flow_table->mask) {
2844                rflow = &flow_table->flows[flow_id];
2845                cpu = ACCESS_ONCE(rflow->cpu);
2846                if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
2847                    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
2848                           rflow->last_qtail) <
2849                     (int)(10 * flow_table->mask)))
2850                        expire = false;
2851        }
2852        rcu_read_unlock();
2853        return expire;
2854}
2855EXPORT_SYMBOL(rps_may_expire_flow);
2856
2857#endif /* CONFIG_RFS_ACCEL */
2858
2859/* Called from hardirq (IPI) context */
2860static void rps_trigger_softirq(void *data)
2861{
2862        struct softnet_data *sd = data;
2863
2864        ____napi_schedule(sd, &sd->backlog);
2865        sd->received_rps++;
2866}
2867
2868#endif /* CONFIG_RPS */
2869
2870/*
2871 * Check if this softnet_data structure is another cpu one
2872 * If yes, queue it to our IPI list and return 1
2873 * If no, return 0
2874 */
2875static int rps_ipi_queued(struct softnet_data *sd)
2876{
2877#ifdef CONFIG_RPS
2878        struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2879
2880        if (sd != mysd) {
2881                sd->rps_ipi_next = mysd->rps_ipi_list;
2882                mysd->rps_ipi_list = sd;
2883
2884                __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2885                return 1;
2886        }
2887#endif /* CONFIG_RPS */
2888        return 0;
2889}
2890
2891/*
2892 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2893 * queue (may be a remote CPU queue).
2894 */
2895static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2896                              unsigned int *qtail)
2897{
2898        struct softnet_data *sd;
2899        unsigned long flags;
2900
2901        sd = &per_cpu(softnet_data, cpu);
2902
2903        local_irq_save(flags);
2904
2905        rps_lock(sd);
2906        if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
2907                if (skb_queue_len(&sd->input_pkt_queue)) {
2908enqueue:
2909                        __skb_queue_tail(&sd->input_pkt_queue, skb);
2910                        input_queue_tail_incr_save(sd, qtail);
2911                        rps_unlock(sd);
2912                        local_irq_restore(flags);
2913                        return NET_RX_SUCCESS;
2914                }
2915
2916                /* Schedule NAPI for backlog device
2917                 * We can use non atomic operation since we own the queue lock
2918                 */
2919                if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
2920                        if (!rps_ipi_queued(sd))
2921                                ____napi_schedule(sd, &sd->backlog);
2922                }
2923                goto enqueue;
2924        }
2925
2926        sd->dropped++;
2927        rps_unlock(sd);
2928
2929        local_irq_restore(flags);
2930
2931        atomic_long_inc(&skb->dev->rx_dropped);
2932        kfree_skb(skb);
2933        return NET_RX_DROP;
2934}
2935
2936/**
2937 *      netif_rx        -       post buffer to the network code
2938 *      @skb: buffer to post
2939 *
2940 *      This function receives a packet from a device driver and queues it for
2941 *      the upper (protocol) levels to process.  It always succeeds. The buffer
2942 *      may be dropped during processing for congestion control or by the
2943 *      protocol layers.
2944 *
2945 *      return values:
2946 *      NET_RX_SUCCESS  (no congestion)
2947 *      NET_RX_DROP     (packet was dropped)
2948 *
2949 */
2950
2951int netif_rx(struct sk_buff *skb)
2952{
2953        int ret;
2954
2955        /* if netpoll wants it, pretend we never saw it */
2956        if (netpoll_rx(skb))
2957                return NET_RX_DROP;
2958
2959        net_timestamp_check(netdev_tstamp_prequeue, skb);
2960
2961        trace_netif_rx(skb);
2962#ifdef CONFIG_RPS
2963        if (static_key_false(&rps_needed)) {
2964                struct rps_dev_flow voidflow, *rflow = &voidflow;
2965                int cpu;
2966
2967                preempt_disable();
2968                rcu_read_lock();
2969
2970                cpu = get_rps_cpu(skb->dev, skb, &rflow);
2971                if (cpu < 0)
2972                        cpu = smp_processor_id();
2973
2974                ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2975
2976                rcu_read_unlock();
2977                preempt_enable();
2978        } else
2979#endif
2980        {
2981                unsigned int qtail;
2982                ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
2983                put_cpu();
2984        }
2985        return ret;
2986}
2987EXPORT_SYMBOL(netif_rx);
2988
2989int netif_rx_ni(struct sk_buff *skb)
2990{
2991        int err;
2992
2993        preempt_disable();
2994        err = netif_rx(skb);
2995        if (local_softirq_pending())
2996                do_softirq();
2997        preempt_enable();
2998
2999        return err;
3000}
3001EXPORT_SYMBOL(netif_rx_ni);
3002
3003static void net_tx_action(struct softirq_action *h)
3004{
3005        struct softnet_data *sd = &__get_cpu_var(softnet_data);
3006
3007        if (sd->completion_queue) {
3008                struct sk_buff *clist;
3009
3010                local_irq_disable();
3011                clist = sd->completion_queue;
3012                sd->completion_queue = NULL;
3013                local_irq_enable();
3014
3015                while (clist) {
3016                        struct sk_buff *skb = clist;
3017                        clist = clist->next;
3018
3019                        WARN_ON(atomic_read(&skb->users));
3020                        trace_kfree_skb(skb, net_tx_action);
3021                        __kfree_skb(skb);
3022                }
3023        }
3024
3025        if (sd->output_queue) {
3026                struct Qdisc *head;
3027
3028                local_irq_disable();
3029                head = sd->output_queue;
3030                sd->output_queue = NULL;
3031                sd->output_queue_tailp = &sd->output_queue;
3032                local_irq_enable();
3033
3034                while (head) {
3035                        struct Qdisc *q = head;
3036                        spinlock_t *root_lock;
3037
3038                        head = head->next_sched;
3039
3040                        root_lock = qdisc_lock(q);
3041                        if (spin_trylock(root_lock)) {
3042                                smp_mb__before_clear_bit();
3043                                clear_bit(__QDISC_STATE_SCHED,
3044                                          &q->state);
3045                                qdisc_run(q);
3046                                spin_unlock(root_lock);
3047                        } else {
3048                                if (!test_bit(__QDISC_STATE_DEACTIVATED,
3049                                              &q->state)) {
3050                                        __netif_reschedule(q);
3051                                } else {
3052                                        smp_mb__before_clear_bit();
3053                                        clear_bit(__QDISC_STATE_SCHED,
3054                                                  &q->state);
3055                                }
3056                        }
3057                }
3058        }
3059}
3060
3061#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3062    (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3063/* This hook is defined here for ATM LANE */
3064int (*br_fdb_test_addr_hook)(struct net_device *dev,
3065                             unsigned char *addr) __read_mostly;
3066EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3067#endif
3068
3069#ifdef CONFIG_NET_CLS_ACT
3070/* TODO: Maybe we should just force sch_ingress to be compiled in
3071 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3072 * a compare and 2 stores extra right now if we dont have it on
3073 * but have CONFIG_NET_CLS_ACT
3074 * NOTE: This doesn't stop any functionality; if you dont have
3075 * the ingress scheduler, you just can't add policies on ingress.
3076 *
3077 */
3078static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3079{
3080        struct net_device *dev = skb->dev;
3081        u32 ttl = G_TC_RTTL(skb->tc_verd);
3082        int result = TC_ACT_OK;
3083        struct Qdisc *q;
3084
3085        if (unlikely(MAX_RED_LOOP < ttl++)) {
3086                net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3087                                     skb->skb_iif, dev->ifindex);
3088                return TC_ACT_SHOT;
3089        }
3090
3091        skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3092        skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3093
3094        q = rxq->qdisc;
3095        if (q != &noop_qdisc) {
3096                spin_lock(qdisc_lock(q));
3097                if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3098                        result = qdisc_enqueue_root(skb, q);
3099                spin_unlock(qdisc_lock(q));
3100        }
3101
3102        return result;
3103}
3104
3105static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3106                                         struct packet_type **pt_prev,
3107                                         int *ret, struct net_device *orig_dev)
3108{
3109        struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3110
3111        if (!rxq || rxq->qdisc == &noop_qdisc)
3112                goto out;
3113
3114        if (*pt_prev) {
3115                *ret = deliver_skb(skb, *pt_prev, orig_dev);
3116                *pt_prev = NULL;
3117        }
3118
3119        switch (ing_filter(skb, rxq)) {
3120        case TC_ACT_SHOT:
3121        case TC_ACT_STOLEN:
3122                kfree_skb(skb);
3123                return NULL;
3124        }
3125
3126out:
3127        skb->tc_verd = 0;
3128        return skb;
3129}
3130#endif
3131
3132/**
3133 *      netdev_rx_handler_register - register receive handler
3134 *      @dev: device to register a handler for
3135 *      @rx_handler: receive handler to register
3136 *      @rx_handler_data: data pointer that is used by rx handler
3137 *
3138 *      Register a receive hander for a device. This handler will then be
3139 *      called from __netif_receive_skb. A negative errno code is returned
3140 *      on a failure.
3141 *
3142 *      The caller must hold the rtnl_mutex.
3143 *
3144 *      For a general description of rx_handler, see enum rx_handler_result.
3145 */
3146int netdev_rx_handler_register(struct net_device *dev,
3147                               rx_handler_func_t *rx_handler,
3148                               void *rx_handler_data)
3149{
3150        ASSERT_RTNL();
3151
3152        if (dev->rx_handler)
3153                return -EBUSY;
3154
3155        rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3156        rcu_assign_pointer(dev->rx_handler, rx_handler);
3157
3158        return 0;
3159}
3160EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3161
3162/**
3163 *      netdev_rx_handler_unregister - unregister receive handler
3164 *      @dev: device to unregister a handler from
3165 *
3166 *      Unregister a receive hander from a device.
3167 *
3168 *      The caller must hold the rtnl_mutex.
3169 */
3170void netdev_rx_handler_unregister(struct net_device *dev)
3171{
3172
3173        ASSERT_RTNL();
3174        RCU_INIT_POINTER(dev->rx_handler, NULL);
3175        RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3176}
3177EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3178
3179/*
3180 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3181 * the special handling of PFMEMALLOC skbs.
3182 */
3183static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3184{
3185        switch (skb->protocol) {
3186        case __constant_htons(ETH_P_ARP):
3187        case __constant_htons(ETH_P_IP):
3188        case __constant_htons(ETH_P_IPV6):
3189        case __constant_htons(ETH_P_8021Q):
3190                return true;
3191        default:
3192                return false;
3193        }
3194}
3195
3196static int __netif_receive_skb(struct sk_buff *skb)
3197{
3198        struct packet_type *ptype, *pt_prev;
3199        rx_handler_func_t *rx_handler;
3200        struct net_device *orig_dev;
3201        struct net_device *null_or_dev;
3202        bool deliver_exact = false;
3203        int ret = NET_RX_DROP;
3204        __be16 type;
3205        unsigned long pflags = current->flags;
3206
3207        net_timestamp_check(!netdev_tstamp_prequeue, skb);
3208
3209        trace_netif_receive_skb(skb);
3210
3211        /*
3212         * PFMEMALLOC skbs are special, they should
3213         * - be delivered to SOCK_MEMALLOC sockets only
3214         * - stay away from userspace
3215         * - have bounded memory usage
3216         *
3217         * Use PF_MEMALLOC as this saves us from propagating the allocation
3218         * context down to all allocation sites.
3219         */
3220        if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3221                current->flags |= PF_MEMALLOC;
3222
3223        /* if we've gotten here through NAPI, check netpoll */
3224        if (netpoll_receive_skb(skb))
3225                goto out;
3226
3227        orig_dev = skb->dev;
3228
3229        skb_reset_network_header(skb);
3230        skb_reset_transport_header(skb);
3231        skb_reset_mac_len(skb);
3232
3233        pt_prev = NULL;
3234
3235        rcu_read_lock();
3236
3237another_round:
3238        skb->skb_iif = skb->dev->ifindex;
3239
3240        __this_cpu_inc(softnet_data.processed);
3241
3242        if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3243                skb = vlan_untag(skb);
3244                if (unlikely(!skb))
3245                        goto unlock;
3246        }
3247
3248#ifdef CONFIG_NET_CLS_ACT
3249        if (skb->tc_verd & TC_NCLS) {
3250                skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3251                goto ncls;
3252        }
3253#endif
3254
3255        if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3256                goto skip_taps;
3257
3258        list_for_each_entry_rcu(ptype, &ptype_all, list) {
3259                if (!ptype->dev || ptype->dev == skb->dev) {
3260                        if (pt_prev)
3261                                ret = deliver_skb(skb, pt_prev, orig_dev);
3262                        pt_prev = ptype;
3263                }
3264        }
3265
3266skip_taps:
3267#ifdef CONFIG_NET_CLS_ACT
3268        skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3269        if (!skb)
3270                goto unlock;
3271ncls:
3272#endif
3273
3274        if (sk_memalloc_socks() && skb_pfmemalloc(skb)
3275                                && !skb_pfmemalloc_protocol(skb))
3276                goto drop;
3277
3278        rx_handler = rcu_dereference(skb->dev->rx_handler);
3279        if (vlan_tx_tag_present(skb)) {
3280                if (pt_prev) {
3281                        ret = deliver_skb(skb, pt_prev, orig_dev);
3282                        pt_prev = NULL;
3283                }
3284                if (vlan_do_receive(&skb, !rx_handler))
3285                        goto another_round;
3286                else if (unlikely(!skb))
3287                        goto unlock;
3288        }
3289
3290        if (rx_handler) {
3291                if (pt_prev) {
3292                        ret = deliver_skb(skb, pt_prev, orig_dev);
3293                        pt_prev = NULL;
3294                }
3295                switch (rx_handler(&skb)) {
3296                case RX_HANDLER_CONSUMED:
3297                        goto unlock;
3298                case RX_HANDLER_ANOTHER:
3299                        goto another_round;
3300                case RX_HANDLER_EXACT:
3301                        deliver_exact = true;
3302                case RX_HANDLER_PASS:
3303                        break;
3304                default:
3305                        BUG();
3306                }
3307        }
3308
3309        /* deliver only exact match when indicated */
3310        null_or_dev = deliver_exact ? skb->dev : NULL;
3311
3312        type = skb->protocol;
3313        list_for_each_entry_rcu(ptype,
3314                        &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3315                if (ptype->type == type &&
3316                    (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3317                     ptype->dev == orig_dev)) {
3318                        if (pt_prev)
3319                                ret = deliver_skb(skb, pt_prev, orig_dev);
3320                        pt_prev = ptype;
3321                }
3322        }
3323
3324        if (pt_prev) {
3325                if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
3326                        goto drop;
3327                else
3328                        ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3329        } else {
3330drop:
3331                atomic_long_inc(&skb->dev->rx_dropped);
3332                kfree_skb(skb);
3333                /* Jamal, now you will not able to escape explaining
3334                 * me how you were going to use this. :-)
3335                 */
3336                ret = NET_RX_DROP;
3337        }
3338
3339unlock:
3340        rcu_read_unlock();
3341out:
3342        tsk_restore_flags(current, pflags, PF_MEMALLOC);
3343        return ret;
3344}
3345
3346/**
3347 *      netif_receive_skb - process receive buffer from network
3348 *      @skb: buffer to process
3349 *
3350 *      netif_receive_skb() is the main receive data processing function.
3351 *      It always succeeds. The buffer may be dropped during processing
3352 *      for congestion control or by the protocol layers.
3353 *
3354 *      This function may only be called from softirq context and interrupts
3355 *      should be enabled.
3356 *
3357 *      Return values (usually ignored):
3358 *      NET_RX_SUCCESS: no congestion
3359 *      NET_RX_DROP: packet was dropped
3360 */
3361int netif_receive_skb(struct sk_buff *skb)
3362{
3363        net_timestamp_check(netdev_tstamp_prequeue, skb);
3364
3365        if (skb_defer_rx_timestamp(skb))
3366                return NET_RX_SUCCESS;
3367
3368#ifdef CONFIG_RPS
3369        if (static_key_false(&rps_needed)) {
3370                struct rps_dev_flow voidflow, *rflow = &voidflow;
3371                int cpu, ret;
3372
3373                rcu_read_lock();
3374
3375                cpu = get_rps_cpu(skb->dev, skb, &rflow);
3376
3377                if (cpu >= 0) {
3378                        ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3379                        rcu_read_unlock();
3380                        return ret;
3381                }
3382                rcu_read_unlock();
3383        }
3384#endif
3385        return __netif_receive_skb(skb);
3386}
3387EXPORT_SYMBOL(netif_receive_skb);
3388
3389/* Network device is going away, flush any packets still pending
3390 * Called with irqs disabled.
3391 */
3392static void flush_backlog(void *arg)
3393{
3394        struct net_device *dev = arg;
3395        struct softnet_data *sd = &__get_cpu_var(softnet_data);
3396        struct sk_buff *skb, *tmp;
3397
3398        rps_lock(sd);
3399        skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3400                if (skb->dev == dev) {
3401                        __skb_unlink(skb, &sd->input_pkt_queue);
3402                        kfree_skb(skb);
3403                        input_queue_head_incr(sd);
3404                }
3405        }
3406        rps_unlock(sd);
3407
3408        skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3409                if (skb->dev == dev) {
3410                        __skb_unlink(skb, &sd->process_queue);
3411                        kfree_skb(skb);
3412                        input_queue_head_incr(sd);
3413                }
3414        }
3415}
3416
3417static int napi_gro_complete(struct sk_buff *skb)
3418{
3419        struct packet_type *ptype;
3420        __be16 type = skb->protocol;
3421        struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3422        int err = -ENOENT;
3423
3424        if (NAPI_GRO_CB(skb)->count == 1) {
3425                skb_shinfo(skb)->gso_size = 0;
3426                goto out;
3427        }
3428
3429        rcu_read_lock();
3430        list_for_each_entry_rcu(ptype, head, list) {
3431                if (ptype->type != type || ptype->dev || !ptype->gro_complete)
3432                        continue;
3433
3434                err = ptype->gro_complete(skb);
3435                break;
3436        }
3437        rcu_read_unlock();
3438
3439        if (err) {
3440                WARN_ON(&ptype->list == head);
3441                kfree_skb(skb);
3442                return NET_RX_SUCCESS;
3443        }
3444
3445out:
3446        return netif_receive_skb(skb);
3447}
3448
3449inline void napi_gro_flush(struct napi_struct *napi)
3450{
3451        struct sk_buff *skb, *next;
3452
3453        for (skb = napi->gro_list; skb; skb = next) {
3454                next = skb->next;
3455                skb->next = NULL;
3456                napi_gro_complete(skb);
3457        }
3458
3459        napi->gro_count = 0;
3460        napi->gro_list = NULL;
3461}
3462EXPORT_SYMBOL(napi_gro_flush);
3463
3464enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3465{
3466        struct sk_buff **pp = NULL;
3467        struct packet_type *ptype;
3468        __be16 type = skb->protocol;
3469        struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3470        int same_flow;
3471        int mac_len;
3472        enum gro_result ret;
3473
3474        if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
3475                goto normal;
3476
3477        if (skb_is_gso(skb) || skb_has_frag_list(skb))
3478                goto normal;
3479
3480        rcu_read_lock();
3481        list_for_each_entry_rcu(ptype, head, list) {
3482                if (ptype->type != type || ptype->dev || !ptype->gro_receive)
3483                        continue;
3484
3485                skb_set_network_header(skb, skb_gro_offset(skb));
3486                mac_len = skb->network_header - skb->mac_header;
3487                skb->mac_len = mac_len;
3488                NAPI_GRO_CB(skb)->same_flow = 0;
3489                NAPI_GRO_CB(skb)->flush = 0;
3490                NAPI_GRO_CB(skb)->free = 0;
3491
3492                pp = ptype->gro_receive(&napi->gro_list, skb);
3493                break;
3494        }
3495        rcu_read_unlock();
3496
3497        if (&ptype->list == head)
3498                goto normal;
3499
3500        same_flow = NAPI_GRO_CB(skb)->same_flow;
3501        ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
3502
3503        if (pp) {
3504                struct sk_buff *nskb = *pp;
3505
3506                *pp = nskb->next;
3507                nskb->next = NULL;
3508                napi_gro_complete(nskb);
3509                napi->gro_count--;
3510        }
3511
3512        if (same_flow)
3513                goto ok;
3514
3515        if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
3516                goto normal;
3517
3518        napi->gro_count++;
3519        NAPI_GRO_CB(skb)->count = 1;
3520        skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3521        skb->next = napi->gro_list;
3522        napi->gro_list = skb;
3523        ret = GRO_HELD;
3524
3525pull:
3526        if (skb_headlen(skb) < skb_gro_offset(skb)) {
3527                int grow = skb_gro_offset(skb) - skb_headlen(skb);
3528
3529                BUG_ON(skb->end - skb->tail < grow);
3530
3531                memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3532
3533                skb->tail += grow;
3534                skb->data_len -= grow;
3535
3536                skb_shinfo(skb)->frags[0].page_offset += grow;
3537                skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
3538
3539                if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
3540                        skb_frag_unref(skb, 0);
3541                        memmove(skb_shinfo(skb)->frags,
3542                                skb_shinfo(skb)->frags + 1,
3543                                --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
3544                }
3545        }
3546
3547ok:
3548        return ret;
3549
3550normal:
3551        ret = GRO_NORMAL;
3552        goto pull;
3553}
3554EXPORT_SYMBOL(dev_gro_receive);
3555
3556static inline gro_result_t
3557__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3558{
3559        struct sk_buff *p;
3560        unsigned int maclen = skb->dev->hard_header_len;
3561
3562        for (p = napi->gro_list; p; p = p->next) {
3563                unsigned long diffs;
3564
3565                diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3566                diffs |= p->vlan_tci ^ skb->vlan_tci;
3567                if (maclen == ETH_HLEN)
3568                        diffs |= compare_ether_header(skb_mac_header(p),
3569                                                      skb_gro_mac_header(skb));
3570                else if (!diffs)
3571                        diffs = memcmp(skb_mac_header(p),
3572                                       skb_gro_mac_header(skb),
3573                                       maclen);
3574                NAPI_GRO_CB(p)->same_flow = !diffs;
3575                NAPI_GRO_CB(p)->flush = 0;
3576        }
3577
3578        return dev_gro_receive(napi, skb);
3579}
3580
3581gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3582{
3583        switch (ret) {
3584        case GRO_NORMAL:
3585                if (netif_receive_skb(skb))
3586                        ret = GRO_DROP;
3587                break;
3588
3589        case GRO_DROP:
3590                kfree_skb(skb);
3591                break;
3592
3593        case GRO_MERGED_FREE:
3594                if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3595                        kmem_cache_free(skbuff_head_cache, skb);
3596                else
3597                        __kfree_skb(skb);
3598                break;
3599
3600        case GRO_HELD:
3601        case GRO_MERGED:
3602                break;
3603        }
3604
3605        return ret;
3606}
3607EXPORT_SYMBOL(napi_skb_finish);
3608
3609void skb_gro_reset_offset(struct sk_buff *skb)
3610{
3611        NAPI_GRO_CB(skb)->data_offset = 0;
3612        NAPI_GRO_CB(skb)->frag0 = NULL;
3613        NAPI_GRO_CB(skb)->frag0_len = 0;
3614
3615        if (skb->mac_header == skb->tail &&
3616            !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
3617                NAPI_GRO_CB(skb)->frag0 =
3618                        skb_frag_address(&skb_shinfo(skb)->frags[0]);
3619                NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]);
3620        }
3621}
3622EXPORT_SYMBOL(skb_gro_reset_offset);
3623
3624gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3625{
3626        skb_gro_reset_offset(skb);
3627
3628        return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
3629}
3630EXPORT_SYMBOL(napi_gro_receive);
3631
3632static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3633{
3634        __skb_pull(skb, skb_headlen(skb));
3635        /* restore the reserve we had after netdev_alloc_skb_ip_align() */
3636        skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3637        skb->vlan_tci = 0;
3638        skb->dev = napi->dev;
3639        skb->skb_iif = 0;
3640
3641        napi->skb = skb;
3642}
3643
3644struct sk_buff *napi_get_frags(struct napi_struct *napi)
3645{
3646        struct sk_buff *skb = napi->skb;
3647
3648        if (!skb) {
3649                skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3650                if (skb)
3651                        napi->skb = skb;
3652        }
3653        return skb;
3654}
3655EXPORT_SYMBOL(napi_get_frags);
3656
3657gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3658                               gro_result_t ret)
3659{
3660        switch (ret) {
3661        case GRO_NORMAL:
3662        case GRO_HELD:
3663                skb->protocol = eth_type_trans(skb, skb->dev);
3664
3665                if (ret == GRO_HELD)
3666                        skb_gro_pull(skb, -ETH_HLEN);
3667                else if (netif_receive_skb(skb))
3668                        ret = GRO_DROP;
3669                break;
3670
3671        case GRO_DROP:
3672        case GRO_MERGED_FREE:
3673                napi_reuse_skb(napi, skb);
3674                break;
3675
3676        case GRO_MERGED:
3677                break;
3678        }
3679
3680        return ret;
3681}
3682EXPORT_SYMBOL(napi_frags_finish);
3683
3684static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3685{
3686        struct sk_buff *skb = napi->skb;
3687        struct ethhdr *eth;
3688        unsigned int hlen;
3689        unsigned int off;
3690
3691        napi->skb = NULL;
3692
3693        skb_reset_mac_header(skb);
3694        skb_gro_reset_offset(skb);
3695
3696        off = skb_gro_offset(skb);
3697        hlen = off + sizeof(*eth);
3698        eth = skb_gro_header_fast(skb, off);
3699        if (skb_gro_header_hard(skb, hlen)) {
3700                eth = skb_gro_header_slow(skb, hlen, off);
3701                if (unlikely(!eth)) {
3702                        napi_reuse_skb(napi, skb);
3703                        skb = NULL;
3704                        goto out;
3705                }
3706        }
3707
3708        skb_gro_pull(skb, sizeof(*eth));
3709
3710        /*
3711         * This works because the only protocols we care about don't require
3712         * special handling.  We'll fix it up properly at the end.
3713         */
3714        skb->protocol = eth->h_proto;
3715
3716out:
3717        return skb;
3718}
3719
3720gro_result_t napi_gro_frags(struct napi_struct *napi)
3721{
3722        struct sk_buff *skb = napi_frags_skb(napi);
3723
3724        if (!skb)
3725                return GRO_DROP;
3726
3727        return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3728}
3729EXPORT_SYMBOL(napi_gro_frags);
3730
3731/*
3732 * net_rps_action sends any pending IPI's for rps.
3733 * Note: called with local irq disabled, but exits with local irq enabled.
3734 */
3735static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3736{
3737#ifdef CONFIG_RPS
3738        struct softnet_data *remsd = sd->rps_ipi_list;
3739
3740        if (remsd) {
3741                sd->rps_ipi_list = NULL;
3742
3743                local_irq_enable();
3744
3745                /* Send pending IPI's to kick RPS processing on remote cpus. */
3746                while (remsd) {
3747                        struct softnet_data *next = remsd->rps_ipi_next;
3748
3749                        if (cpu_online(remsd->cpu))
3750                                __smp_call_function_single(remsd->cpu,
3751                                                           &remsd->csd, 0);
3752                        remsd = next;
3753                }
3754        } else
3755#endif
3756                local_irq_enable();
3757}
3758
3759static int process_backlog(struct napi_struct *napi, int quota)
3760{
3761        int work = 0;
3762        struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
3763
3764#ifdef CONFIG_RPS
3765        /* Check if we have pending ipi, its better to send them now,
3766         * not waiting net_rx_action() end.
3767         */
3768        if (sd->rps_ipi_list) {
3769                local_irq_disable();
3770                net_rps_action_and_irq_enable(sd);
3771        }
3772#endif
3773        napi->weight = weight_p;
3774        local_irq_disable();
3775        while (work < quota) {
3776                struct sk_buff *skb;
3777                unsigned int qlen;
3778
3779                while ((skb = __skb_dequeue(&sd->process_queue))) {
3780                        local_irq_enable();
3781                        __netif_receive_skb(skb);
3782                        local_irq_disable();
3783                        input_queue_head_incr(sd);
3784                        if (++work >= quota) {
3785                                local_irq_enable();
3786                                return work;
3787                        }
3788                }
3789
3790                rps_lock(sd);
3791                qlen = skb_queue_len(&sd->input_pkt_queue);
3792                if (qlen)
3793                        skb_queue_splice_tail_init(&sd->input_pkt_queue,
3794                                                   &sd->process_queue);
3795
3796                if (qlen < quota - work) {
3797                        /*
3798                         * Inline a custom version of __napi_complete().
3799                         * only current cpu owns and manipulates this napi,
3800                         * and NAPI_STATE_SCHED is the only possible flag set on backlog.
3801                         * we can use a plain write instead of clear_bit(),
3802                         * and we dont need an smp_mb() memory barrier.
3803                         */
3804                        list_del(&napi->poll_list);
3805                        napi->state = 0;
3806
3807                        quota = work + qlen;
3808                }
3809                rps_unlock(sd);
3810        }
3811        local_irq_enable();
3812
3813        return work;
3814}
3815
3816/**
3817 * __napi_schedule - schedule for receive
3818 * @n: entry to schedule
3819 *
3820 * The entry's receive function will be scheduled to run
3821 */
3822void __napi_schedule(struct napi_struct *n)
3823{
3824        unsigned long flags;
3825
3826        local_irq_save(flags);
3827        ____napi_schedule(&__get_cpu_var(softnet_data), n);
3828        local_irq_restore(flags);
3829}
3830EXPORT_SYMBOL(__napi_schedule);
3831
3832void __napi_complete(struct napi_struct *n)
3833{
3834        BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3835        BUG_ON(n->gro_list);
3836
3837        list_del(&n->poll_list);
3838        smp_mb__before_clear_bit();
3839        clear_bit(NAPI_STATE_SCHED, &n->state);
3840}
3841EXPORT_SYMBOL(__napi_complete);
3842
3843void napi_complete(struct napi_struct *n)
3844{
3845        unsigned long flags;
3846
3847        /*
3848         * don't let napi dequeue from the cpu poll list
3849         * just in case its running on a different cpu
3850         */
3851        if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3852                return;
3853
3854        napi_gro_flush(n);
3855        local_irq_save(flags);
3856        __napi_complete(n);
3857        local_irq_restore(flags);
3858}
3859EXPORT_SYMBOL(napi_complete);
3860
3861void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3862                    int (*poll)(struct napi_struct *, int), int weight)
3863{
3864        INIT_LIST_HEAD(&napi->poll_list);
3865        napi->gro_count = 0;
3866        napi->gro_list = NULL;
3867        napi->skb = NULL;
3868        napi->poll = poll;
3869        napi->weight = weight;
3870        list_add(&napi->dev_list, &dev->napi_list);
3871        napi->dev = dev;
3872#ifdef CONFIG_NETPOLL
3873        spin_lock_init(&napi->poll_lock);
3874        napi->poll_owner = -1;
3875#endif
3876        set_bit(NAPI_STATE_SCHED, &napi->state);
3877}
3878EXPORT_SYMBOL(netif_napi_add);
3879
3880void netif_napi_del(struct napi_struct *napi)
3881{
3882        struct sk_buff *skb, *next;
3883
3884        list_del_init(&napi->dev_list);
3885        napi_free_frags(napi);
3886
3887        for (skb = napi->gro_list; skb; skb = next) {
3888                next = skb->next;
3889                skb->next = NULL;
3890                kfree_skb(skb);
3891        }
3892
3893        napi->gro_list = NULL;
3894        napi->gro_count = 0;
3895}
3896EXPORT_SYMBOL(netif_napi_del);
3897
3898static void net_rx_action(struct softirq_action *h)
3899{
3900        struct softnet_data *sd = &__get_cpu_var(softnet_data);
3901        unsigned long time_limit = jiffies + 2;
3902        int budget = netdev_budget;
3903        void *have;
3904
3905        local_irq_disable();
3906
3907        while (!list_empty(&sd->poll_list)) {
3908                struct napi_struct *n;
3909                int work, weight;
3910
3911                /* If softirq window is exhuasted then punt.
3912                 * Allow this to run for 2 jiffies since which will allow
3913                 * an average latency of 1.5/HZ.
3914                 */
3915                if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
3916                        goto softnet_break;
3917
3918                local_irq_enable();
3919
3920                /* Even though interrupts have been re-enabled, this
3921                 * access is safe because interrupts can only add new
3922                 * entries to the tail of this list, and only ->poll()
3923                 * calls can remove this head entry from the list.
3924                 */
3925                n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
3926
3927                have = netpoll_poll_lock(n);
3928
3929                weight = n->weight;
3930
3931                /* This NAPI_STATE_SCHED test is for avoiding a race
3932                 * with netpoll's poll_napi().  Only the entity which
3933                 * obtains the lock and sees NAPI_STATE_SCHED set will
3934                 * actually make the ->poll() call.  Therefore we avoid
3935                 * accidentally calling ->poll() when NAPI is not scheduled.
3936                 */
3937                work = 0;
3938                if (test_bit(NAPI_STATE_SCHED, &n->state)) {
3939                        work = n->poll(n, weight);
3940                        trace_napi_poll(n);
3941                }
3942
3943                WARN_ON_ONCE(work > weight);
3944
3945                budget -= work;
3946
3947                local_irq_disable();
3948
3949                /* Drivers must not modify the NAPI state if they
3950                 * consume the entire weight.  In such cases this code
3951                 * still "owns" the NAPI instance and therefore can
3952                 * move the instance around on the list at-will.
3953                 */
3954                if (unlikely(work == weight)) {
3955                        if (unlikely(napi_disable_pending(n))) {
3956                                local_irq_enable();
3957                                napi_complete(n);
3958                                local_irq_disable();
3959                        } else
3960                                list_move_tail(&n->poll_list, &sd->poll_list);
3961                }
3962
3963                netpoll_poll_unlock(have);
3964        }
3965out:
3966        net_rps_action_and_irq_enable(sd);
3967
3968#ifdef CONFIG_NET_DMA
3969        /*
3970         * There may not be any more sk_buffs coming right now, so push
3971         * any pending DMA copies to hardware
3972         */
3973        dma_issue_pending_all();
3974#endif
3975
3976        return;
3977
3978softnet_break:
3979        sd->time_squeeze++;
3980        __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3981        goto out;
3982}
3983
3984static gifconf_func_t *gifconf_list[NPROTO];
3985
3986/**
3987 *      register_gifconf        -       register a SIOCGIF handler
3988 *      @family: Address family
3989 *      @gifconf: Function handler
3990 *
3991 *      Register protocol dependent address dumping routines. The handler
3992 *      that is passed must not be freed or reused until it has been replaced
3993 *      by another handler.
3994 */
3995int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
3996{
3997        if (family >= NPROTO)
3998                return -EINVAL;
3999        gifconf_list[family] = gifconf;
4000        return 0;
4001}
4002EXPORT_SYMBOL(register_gifconf);
4003
4004
4005/*
4006 *      Map an interface index to its name (SIOCGIFNAME)
4007 */
4008
4009/*
4010 *      We need this ioctl for efficient implementation of the
4011 *      if_indextoname() function required by the IPv6 API.  Without
4012 *      it, we would have to search all the interfaces to find a
4013 *      match.  --pb
4014 */
4015
4016static int dev_ifname(struct net *net, struct ifreq __user *arg)
4017{
4018        struct net_device *dev;
4019        struct ifreq ifr;
4020
4021        /*
4022         *      Fetch the caller's info block.
4023         */
4024
4025        if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4026                return -EFAULT;
4027
4028        rcu_read_lock();
4029        dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
4030        if (!dev) {
4031                rcu_read_unlock();
4032                return -ENODEV;
4033        }
4034
4035        strcpy(ifr.ifr_name, dev->name);
4036        rcu_read_unlock();
4037
4038        if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
4039                return -EFAULT;
4040        return 0;
4041}
4042
4043/*
4044 *      Perform a SIOCGIFCONF call. This structure will change
4045 *      size eventually, and there is nothing I can do about it.
4046 *      Thus we will need a 'compatibility mode'.
4047 */
4048
4049static int dev_ifconf(struct net *net, char __user *arg)
4050{
4051        struct ifconf ifc;
4052        struct net_device *dev;
4053        char __user *pos;
4054        int len;
4055        int total;
4056        int i;
4057
4058        /*
4059         *      Fetch the caller's info block.
4060         */
4061
4062        if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
4063                return -EFAULT;
4064
4065        pos = ifc.ifc_buf;
4066        len = ifc.ifc_len;
4067
4068        /*
4069         *      Loop over the interfaces, and write an info block for each.
4070         */
4071
4072        total = 0;
4073        for_each_netdev(net, dev) {
4074                for (i = 0; i < NPROTO; i++) {
4075                        if (gifconf_list[i]) {
4076                                int done;
4077                                if (!pos)
4078                                        done = gifconf_list[i](dev, NULL, 0);
4079                                else
4080                                        done = gifconf_list[i](dev, pos + total,
4081                                                               len - total);
4082                                if (done < 0)
4083                                        return -EFAULT;
4084                                total += done;
4085                        }
4086                }
4087        }
4088
4089        /*
4090         *      All done.  Write the updated control block back to the caller.
4091         */
4092        ifc.ifc_len = total;
4093
4094        /*
4095         *      Both BSD and Solaris return 0 here, so we do too.
4096         */
4097        return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
4098}
4099
4100#ifdef CONFIG_PROC_FS
4101
4102#define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
4103
4104#define get_bucket(x) ((x) >> BUCKET_SPACE)
4105#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4106#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4107
4108static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
4109{
4110        struct net *net = seq_file_net(seq);
4111        struct net_device *dev;
4112        struct hlist_node *p;
4113        struct hlist_head *h;
4114        unsigned int count = 0, offset = get_offset(*pos);
4115
4116        h = &net->dev_name_head[get_bucket(*pos)];
4117        hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
4118                if (++count == offset)
4119                        return dev;
4120        }
4121
4122        return NULL;
4123}
4124
4125static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
4126{
4127        struct net_device *dev;
4128        unsigned int bucket;
4129
4130        do {
4131                dev = dev_from_same_bucket(seq, pos);
4132                if (dev)
4133                        return dev;
4134
4135                bucket = get_bucket(*pos) + 1;
4136                *pos = set_bucket_offset(bucket, 1);
4137        } while (bucket < NETDEV_HASHENTRIES);
4138
4139        return NULL;
4140}
4141
4142/*
4143 *      This is invoked by the /proc filesystem handler to display a device
4144 *      in detail.
4145 */
4146void *dev_seq_start(struct seq_file *seq, loff_t *pos)
4147        __acquires(RCU)
4148{
4149        rcu_read_lock();
4150        if (!*pos)
4151                return SEQ_START_TOKEN;
4152
4153        if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
4154                return NULL;
4155
4156        return dev_from_bucket(seq, pos);
4157}
4158
4159void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4160{
4161        ++*pos;
4162        return dev_from_bucket(seq, pos);
4163}
4164
4165void dev_seq_stop(struct seq_file *seq, void *v)
4166        __releases(RCU)
4167{
4168        rcu_read_unlock();
4169}
4170
4171static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
4172{
4173        struct rtnl_link_stats64 temp;
4174        const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
4175
4176        seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
4177                   "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
4178                   dev->name, stats->rx_bytes, stats->rx_packets,
4179                   stats->rx_errors,
4180                   stats->rx_dropped + stats->rx_missed_errors,
4181                   stats->rx_fifo_errors,
4182                   stats->rx_length_errors + stats->rx_over_errors +
4183                    stats->rx_crc_errors + stats->rx_frame_errors,
4184                   stats->rx_compressed, stats->multicast,
4185                   stats->tx_bytes, stats->tx_packets,
4186                   stats->tx_errors, stats->tx_dropped,
4187                   stats->tx_fifo_errors, stats->collisions,
4188                   stats->tx_carrier_errors +
4189                    stats->tx_aborted_errors +
4190                    stats->tx_window_errors +
4191                    stats->tx_heartbeat_errors,
4192                   stats->tx_compressed);
4193}
4194
4195/*
4196 *      Called from the PROCfs module. This now uses the new arbitrary sized
4197 *      /proc/net interface to create /proc/net/dev
4198 */
4199static int dev_seq_show(struct seq_file *seq, void *v)
4200{
4201        if (v == SEQ_START_TOKEN)
4202                seq_puts(seq, "Inter-|   Receive                            "
4203                              "                    |  Transmit\n"
4204                              " face |bytes    packets errs drop fifo frame "
4205                              "compressed multicast|bytes    packets errs "
4206                              "drop fifo colls carrier compressed\n");
4207        else
4208                dev_seq_printf_stats(seq, v);
4209        return 0;
4210}
4211
4212static struct softnet_data *softnet_get_online(loff_t *pos)
4213{
4214        struct softnet_data *sd = NULL;
4215
4216        while (*pos < nr_cpu_ids)
4217                if (cpu_online(*pos)) {
4218                        sd = &per_cpu(softnet_data, *pos);
4219                        break;
4220                } else
4221                        ++*pos;
4222        return sd;
4223}
4224
4225static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
4226{
4227        return softnet_get_online(pos);
4228}
4229
4230static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4231{
4232        ++*pos;
4233        return softnet_get_online(pos);
4234}
4235
4236static void softnet_seq_stop(struct seq_file *seq, void *v)
4237{
4238}
4239
4240static int softnet_seq_show(struct seq_file *seq, void *v)
4241{
4242        struct softnet_data *sd = v;
4243
4244        seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
4245                   sd->processed, sd->dropped, sd->time_squeeze, 0,
4246                   0, 0, 0, 0, /* was fastroute */
4247                   sd->cpu_collision, sd->received_rps);
4248        return 0;
4249}
4250
4251static const struct seq_operations dev_seq_ops = {
4252        .start = dev_seq_start,
4253        .next  = dev_seq_next,
4254        .stop  = dev_seq_stop,
4255        .show  = dev_seq_show,
4256};
4257
4258static int dev_seq_open(struct inode *inode, struct file *file)
4259{
4260        return seq_open_net(inode, file, &dev_seq_ops,
4261                            sizeof(struct seq_net_private));
4262}
4263
4264static const struct file_operations dev_seq_fops = {
4265        .owner   = THIS_MODULE,
4266        .open    = dev_seq_open,
4267        .read    = seq_read,
4268        .llseek  = seq_lseek,
4269        .release = seq_release_net,
4270};
4271
4272static const struct seq_operations softnet_seq_ops = {
4273        .start = softnet_seq_start,
4274        .next  = softnet_seq_next,
4275        .stop  = softnet_seq_stop,
4276        .show  = softnet_seq_show,
4277};
4278
4279static int softnet_seq_open(struct inode *inode, struct file *file)
4280{
4281        return seq_open(file, &softnet_seq_ops);
4282}
4283
4284static const struct file_operations softnet_seq_fops = {
4285        .owner   = THIS_MODULE,
4286        .open    = softnet_seq_open,
4287        .read    = seq_read,
4288        .llseek  = seq_lseek,
4289        .release = seq_release,
4290};
4291
4292static void *ptype_get_idx(loff_t pos)
4293{
4294        struct packet_type *pt = NULL;
4295        loff_t i = 0;
4296        int t;
4297
4298        list_for_each_entry_rcu(pt, &ptype_all, list) {
4299                if (i == pos)
4300                        return pt;
4301                ++i;
4302        }
4303
4304        for (t = 0; t < PTYPE_HASH_SIZE; t++) {
4305                list_for_each_entry_rcu(pt, &ptype_base[t], list) {
4306                        if (i == pos)
4307                                return pt;
4308                        ++i;
4309                }
4310        }
4311        return NULL;
4312}
4313
4314static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
4315        __acquires(RCU)
4316{
4317        rcu_read_lock();
4318        return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
4319}
4320
4321static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4322{
4323        struct packet_type *pt;
4324        struct list_head *nxt;
4325        int hash;
4326
4327        ++*pos;
4328        if (v == SEQ_START_TOKEN)
4329                return ptype_get_idx(0);
4330
4331        pt = v;
4332        nxt = pt->list.next;
4333        if (pt->type == htons(ETH_P_ALL)) {
4334                if (nxt != &ptype_all)
4335                        goto found;
4336                hash = 0;
4337                nxt = ptype_base[0].next;
4338        } else
4339                hash = ntohs(pt->type) & PTYPE_HASH_MASK;
4340
4341        while (nxt == &ptype_base[hash]) {
4342                if (++hash >= PTYPE_HASH_SIZE)
4343                        return NULL;
4344                nxt = ptype_base[hash].next;
4345        }
4346found:
4347        return list_entry(nxt, struct packet_type, list);
4348}
4349
4350static void ptype_seq_stop(struct seq_file *seq, void *v)
4351        __releases(RCU)
4352{
4353        rcu_read_unlock();
4354}
4355
4356static int ptype_seq_show(struct seq_file *seq, void *v)
4357{
4358        struct packet_type *pt = v;
4359
4360        if (v == SEQ_START_TOKEN)
4361                seq_puts(seq, "Type Device      Function\n");
4362        else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
4363                if (pt->type == htons(ETH_P_ALL))
4364                        seq_puts(seq, "ALL ");
4365                else
4366                        seq_printf(seq, "%04x", ntohs(pt->type));
4367
4368                seq_printf(seq, " %-8s %pF\n",
4369                           pt->dev ? pt->dev->name : "", pt->func);
4370        }
4371
4372        return 0;
4373}
4374
4375static const struct seq_operations ptype_seq_ops = {
4376        .start = ptype_seq_start,
4377        .next  = ptype_seq_next,
4378        .stop  = ptype_seq_stop,
4379        .show  = ptype_seq_show,
4380};
4381
4382static int ptype_seq_open(struct inode *inode, struct file *file)
4383{
4384        return seq_open_net(inode, file, &ptype_seq_ops,
4385                        sizeof(struct seq_net_private));
4386}
4387
4388static const struct file_operations ptype_seq_fops = {
4389        .owner   = THIS_MODULE,
4390        .open    = ptype_seq_open,
4391        .read    = seq_read,
4392        .llseek  = seq_lseek,
4393        .release = seq_release_net,
4394};
4395
4396
4397static int __net_init dev_proc_net_init(struct net *net)
4398{
4399        int rc = -ENOMEM;
4400
4401        if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
4402                goto out;
4403        if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
4404                goto out_dev;
4405        if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
4406                goto out_softnet;
4407
4408        if (wext_proc_init(net))
4409                goto out_ptype;
4410        rc = 0;
4411out:
4412        return rc;
4413out_ptype:
4414        proc_net_remove(net, "ptype");
4415out_softnet:
4416        proc_net_remove(net, "softnet_stat");
4417out_dev:
4418        proc_net_remove(net, "dev");
4419        goto out;
4420}
4421
4422static void __net_exit dev_proc_net_exit(struct net *net)
4423{
4424        wext_proc_exit(net);
4425
4426        proc_net_remove(net, "ptype");
4427        proc_net_remove(net, "softnet_stat");
4428        proc_net_remove(net, "dev");
4429}
4430
4431static struct pernet_operations __net_initdata dev_proc_ops = {
4432        .init = dev_proc_net_init,
4433        .exit = dev_proc_net_exit,
4434};
4435
4436static int __init dev_proc_init(void)
4437{
4438        return register_pernet_subsys(&dev_proc_ops);
4439}
4440#else
4441#define dev_proc_init() 0
4442#endif  /* CONFIG_PROC_FS */
4443
4444
4445/**
4446 *      netdev_set_master       -       set up master pointer
4447 *      @slave: slave device
4448 *      @master: new master device
4449 *
4450 *      Changes the master device of the slave. Pass %NULL to break the
4451 *      bonding. The caller must hold the RTNL semaphore. On a failure
4452 *      a negative errno code is returned. On success the reference counts
4453 *      are adjusted and the function returns zero.
4454 */
4455int netdev_set_master(struct net_device *slave, struct net_device *master)
4456{
4457        struct net_device *old = slave->master;
4458
4459        ASSERT_RTNL();
4460
4461        if (master) {
4462                if (old)
4463                        return -EBUSY;
4464                dev_hold(master);
4465        }
4466
4467        slave->master = master;
4468
4469        if (old)
4470                dev_put(old);
4471        return 0;
4472}
4473EXPORT_SYMBOL(netdev_set_master);
4474
4475/**
4476 *      netdev_set_bond_master  -       set up bonding master/slave pair
4477 *      @slave: slave device
4478 *      @master: new master device
4479 *
4480 *      Changes the master device of the slave. Pass %NULL to break the
4481 *      bonding. The caller must hold the RTNL semaphore. On a failure
4482 *      a negative errno code is returned. On success %RTM_NEWLINK is sent
4483 *      to the routing socket and the function returns zero.
4484 */
4485int netdev_set_bond_master(struct net_device *slave, struct net_device *master)
4486{
4487        int err;
4488
4489        ASSERT_RTNL();
4490
4491        err = netdev_set_master(slave, master);
4492        if (err)
4493                return err;
4494        if (master)
4495                slave->flags |= IFF_SLAVE;
4496        else
4497                slave->flags &= ~IFF_SLAVE;
4498
4499        rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
4500        return 0;
4501}
4502EXPORT_SYMBOL(netdev_set_bond_master);
4503
4504static void dev_change_rx_flags(struct net_device *dev, int flags)
4505{
4506        const struct net_device_ops *ops = dev->netdev_ops;
4507
4508        if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4509                ops->ndo_change_rx_flags(dev, flags);
4510}
4511
4512static int __dev_set_promiscuity(struct net_device *dev, int inc)
4513{
4514        unsigned int old_flags = dev->flags;
4515        uid_t uid;
4516        gid_t gid;
4517
4518        ASSERT_RTNL();
4519
4520        dev->flags |= IFF_PROMISC;
4521        dev->promiscuity += inc;
4522        if (dev->promiscuity == 0) {
4523                /*
4524                 * Avoid overflow.
4525                 * If inc causes overflow, untouch promisc and return error.
4526                 */
4527                if (inc < 0)
4528                        dev->flags &= ~IFF_PROMISC;
4529                else {
4530                        dev->promiscuity -= inc;
4531                        pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4532                                dev->name);
4533                        return -EOVERFLOW;
4534                }
4535        }
4536        if (dev->flags != old_flags) {
4537                pr_info("device %s %s promiscuous mode\n",
4538                        dev->name,
4539                        dev->flags & IFF_PROMISC ? "entered" : "left");
4540                if (audit_enabled) {
4541                        current_uid_gid(&uid, &gid);
4542                        audit_log(current->audit_context, GFP_ATOMIC,
4543                                AUDIT_ANOM_PROMISCUOUS,
4544                                "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4545                                dev->name, (dev->flags & IFF_PROMISC),
4546                                (old_flags & IFF_PROMISC),
4547                                audit_get_loginuid(current),
4548                                uid, gid,
4549                                audit_get_sessionid(current));
4550                }
4551
4552                dev_change_rx_flags(dev, IFF_PROMISC);
4553        }
4554        return 0;
4555}
4556
4557/**
4558 *      dev_set_promiscuity     - update promiscuity count on a device
4559 *      @dev: device
4560 *      @inc: modifier
4561 *
4562 *      Add or remove promiscuity from a device. While the count in the device
4563 *      remains above zero the interface remains promiscuous. Once it hits zero
4564 *      the device reverts back to normal filtering operation. A negative inc
4565 *      value is used to drop promiscuity on the device.
4566 *      Return 0 if successful or a negative errno code on error.
4567 */
4568int dev_set_promiscuity(struct net_device *dev, int inc)
4569{
4570        unsigned int old_flags = dev->flags;
4571        int err;
4572
4573        err = __dev_set_promiscuity(dev, inc);
4574        if (err < 0)
4575                return err;
4576        if (dev->flags != old_flags)
4577                dev_set_rx_mode(dev);
4578        return err;
4579}
4580EXPORT_SYMBOL(dev_set_promiscuity);
4581
4582/**
4583 *      dev_set_allmulti        - update allmulti count on a device
4584 *      @dev: device
4585 *      @inc: modifier
4586 *
4587 *      Add or remove reception of all multicast frames to a device. While the
4588 *      count in the device remains above zero the interface remains listening
4589 *      to all interfaces. Once it hits zero the device reverts back to normal
4590 *      filtering operation. A negative @inc value is used to drop the counter
4591 *      when releasing a resource needing all multicasts.
4592 *      Return 0 if successful or a negative errno code on error.
4593 */
4594
4595int dev_set_allmulti(struct net_device *dev, int inc)
4596{
4597        unsigned int old_flags = dev->flags;
4598
4599        ASSERT_RTNL();
4600
4601        dev->flags |= IFF_ALLMULTI;
4602        dev->allmulti += inc;
4603        if (dev->allmulti == 0) {
4604                /*
4605                 * Avoid overflow.
4606                 * If inc causes overflow, untouch allmulti and return error.
4607                 */
4608                if (inc < 0)
4609                        dev->flags &= ~IFF_ALLMULTI;
4610                else {
4611                        dev->allmulti -= inc;
4612                        pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4613                                dev->name);
4614                        return -EOVERFLOW;
4615                }
4616        }
4617        if (dev->flags ^ old_flags) {
4618                dev_change_rx_flags(dev, IFF_ALLMULTI);
4619                dev_set_rx_mode(dev);
4620        }
4621        return 0;
4622}
4623EXPORT_SYMBOL(dev_set_allmulti);
4624
4625/*
4626 *      Upload unicast and multicast address lists to device and
4627 *      configure RX filtering. When the device doesn't support unicast
4628 *      filtering it is put in promiscuous mode while unicast addresses
4629 *      are present.
4630 */
4631void __dev_set_rx_mode(struct net_device *dev)
4632{
4633        const struct net_device_ops *ops = dev->netdev_ops;
4634
4635        /* dev_open will call this function so the list will stay sane. */
4636        if (!(dev->flags&IFF_UP))
4637                return;
4638
4639        if (!netif_device_present(dev))
4640                return;
4641
4642        if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4643                /* Unicast addresses changes may only happen under the rtnl,
4644                 * therefore calling __dev_set_promiscuity here is safe.
4645                 */
4646                if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4647                        __dev_set_promiscuity(dev, 1);
4648                        dev->uc_promisc = true;
4649                } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4650                        __dev_set_promiscuity(dev, -1);
4651                        dev->uc_promisc = false;
4652                }
4653        }
4654
4655        if (ops->ndo_set_rx_mode)
4656                ops->ndo_set_rx_mode(dev);
4657}
4658
4659void dev_set_rx_mode(struct net_device *dev)
4660{
4661        netif_addr_lock_bh(dev);
4662        __dev_set_rx_mode(dev);
4663        netif_addr_unlock_bh(dev);
4664}
4665
4666/**
4667 *      dev_get_flags - get flags reported to userspace
4668 *      @dev: device
4669 *
4670 *      Get the combination of flag bits exported through APIs to userspace.
4671 */
4672unsigned int dev_get_flags(const struct net_device *dev)
4673{
4674        unsigned int flags;
4675
4676        flags = (dev->flags & ~(IFF_PROMISC |
4677                                IFF_ALLMULTI |
4678                                IFF_RUNNING |
4679                                IFF_LOWER_UP |
4680                                IFF_DORMANT)) |
4681                (dev->gflags & (IFF_PROMISC |
4682                                IFF_ALLMULTI));
4683
4684        if (netif_running(dev)) {
4685                if (netif_oper_up(dev))
4686                        flags |= IFF_RUNNING;
4687                if (netif_carrier_ok(dev))
4688                        flags |= IFF_LOWER_UP;
4689                if (netif_dormant(dev))
4690                        flags |= IFF_DORMANT;
4691        }
4692
4693        return flags;
4694}
4695EXPORT_SYMBOL(dev_get_flags);
4696
4697int __dev_change_flags(struct net_device *dev, unsigned int flags)
4698{
4699        unsigned int old_flags = dev->flags;
4700        int ret;
4701
4702        ASSERT_RTNL();
4703
4704        /*
4705         *      Set the flags on our device.
4706         */
4707
4708        dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4709                               IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4710                               IFF_AUTOMEDIA)) |
4711                     (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4712                                    IFF_ALLMULTI));
4713
4714        /*
4715         *      Load in the correct multicast list now the flags have changed.
4716         */
4717
4718        if ((old_flags ^ flags) & IFF_MULTICAST)
4719                dev_change_rx_flags(dev, IFF_MULTICAST);
4720
4721        dev_set_rx_mode(dev);
4722
4723        /*
4724         *      Have we downed the interface. We handle IFF_UP ourselves
4725         *      according to user attempts to set it, rather than blindly
4726         *      setting it.
4727         */
4728
4729        ret = 0;
4730        if ((old_flags ^ flags) & IFF_UP) {     /* Bit is different  ? */
4731                ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
4732
4733                if (!ret)
4734                        dev_set_rx_mode(dev);
4735        }
4736
4737        if ((flags ^ dev->gflags) & IFF_PROMISC) {
4738                int inc = (flags & IFF_PROMISC) ? 1 : -1;
4739
4740                dev->gflags ^= IFF_PROMISC;
4741                dev_set_promiscuity(dev, inc);
4742        }
4743
4744        /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4745           is important. Some (broken) drivers set IFF_PROMISC, when
4746           IFF_ALLMULTI is requested not asking us and not reporting.
4747         */
4748        if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4749                int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4750
4751                dev->gflags ^= IFF_ALLMULTI;
4752                dev_set_allmulti(dev, inc);
4753        }
4754
4755        return ret;
4756}
4757
4758void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4759{
4760        unsigned int changes = dev->flags ^ old_flags;
4761
4762        if (changes & IFF_UP) {
4763                if (dev->flags & IFF_UP)
4764                        call_netdevice_notifiers(NETDEV_UP, dev);
4765                else
4766                        call_netdevice_notifiers(NETDEV_DOWN, dev);
4767        }
4768
4769        if (dev->flags & IFF_UP &&
4770            (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4771                call_netdevice_notifiers(NETDEV_CHANGE, dev);
4772}
4773
4774/**
4775 *      dev_change_flags - change device settings
4776 *      @dev: device
4777 *      @flags: device state flags
4778 *
4779 *      Change settings on device based state flags. The flags are
4780 *      in the userspace exported format.
4781 */
4782int dev_change_flags(struct net_device *dev, unsigned int flags)
4783{
4784        int ret;
4785        unsigned int changes, old_flags = dev->flags;
4786
4787        ret = __dev_change_flags(dev, flags);
4788        if (ret < 0)
4789                return ret;
4790
4791        changes = old_flags ^ dev->flags;
4792        if (changes)
4793                rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4794
4795        __dev_notify_flags(dev, old_flags);
4796        return ret;
4797}
4798EXPORT_SYMBOL(dev_change_flags);
4799
4800/**
4801 *      dev_set_mtu - Change maximum transfer unit
4802 *      @dev: device
4803 *      @new_mtu: new transfer unit
4804 *
4805 *      Change the maximum transfer size of the network device.
4806 */
4807int dev_set_mtu(struct net_device *dev, int new_mtu)
4808{
4809        const struct net_device_ops *ops = dev->netdev_ops;
4810        int err;
4811
4812        if (new_mtu == dev->mtu)
4813                return 0;
4814
4815        /*      MTU must be positive.    */
4816        if (new_mtu < 0)
4817                return -EINVAL;
4818
4819        if (!netif_device_present(dev))
4820                return -ENODEV;
4821
4822        err = 0;
4823        if (ops->ndo_change_mtu)
4824                err = ops->ndo_change_mtu(dev, new_mtu);
4825        else
4826                dev->mtu = new_mtu;
4827
4828        if (!err && dev->flags & IFF_UP)
4829                call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4830        return err;
4831}
4832EXPORT_SYMBOL(dev_set_mtu);
4833
4834/**
4835 *      dev_set_group - Change group this device belongs to
4836 *      @dev: device
4837 *      @new_group: group this device should belong to
4838 */
4839void dev_set_group(struct net_device *dev, int new_group)
4840{
4841        dev->group = new_group;
4842}
4843EXPORT_SYMBOL(dev_set_group);
4844
4845/**
4846 *      dev_set_mac_address - Change Media Access Control Address
4847 *      @dev: device
4848 *      @sa: new address
4849 *
4850 *      Change the hardware (MAC) address of the device
4851 */
4852int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4853{
4854        const struct net_device_ops *ops = dev->netdev_ops;
4855        int err;
4856
4857        if (!ops->ndo_set_mac_address)
4858                return -EOPNOTSUPP;
4859        if (sa->sa_family != dev->type)
4860                return -EINVAL;
4861        if (!netif_device_present(dev))
4862                return -ENODEV;
4863        err = ops->ndo_set_mac_address(dev, sa);
4864        if (!err)
4865                call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4866        add_device_randomness(dev->dev_addr, dev->addr_len);
4867        return err;
4868}
4869EXPORT_SYMBOL(dev_set_mac_address);
4870
4871/*
4872 *      Perform the SIOCxIFxxx calls, inside rcu_read_lock()
4873 */
4874static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
4875{
4876        int err;
4877        struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
4878
4879        if (!dev)
4880                return -ENODEV;
4881
4882        switch (cmd) {
4883        case SIOCGIFFLAGS:      /* Get interface flags */
4884                ifr->ifr_flags = (short) dev_get_flags(dev);
4885                return 0;
4886
4887        case SIOCGIFMETRIC:     /* Get the metric on the interface
4888                                   (currently unused) */
4889                ifr->ifr_metric = 0;
4890                return 0;
4891
4892        case SIOCGIFMTU:        /* Get the MTU of a device */
4893                ifr->ifr_mtu = dev->mtu;
4894                return 0;
4895
4896        case SIOCGIFHWADDR:
4897                if (!dev->addr_len)
4898                        memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4899                else
4900                        memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4901                               min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4902                ifr->ifr_hwaddr.sa_family = dev->type;
4903                return 0;
4904
4905        case SIOCGIFSLAVE:
4906                err = -EINVAL;
4907                break;
4908
4909        case SIOCGIFMAP:
4910                ifr->ifr_map.mem_start = dev->mem_start;
4911                ifr->ifr_map.mem_end   = dev->mem_end;
4912                ifr->ifr_map.base_addr = dev->base_addr;
4913                ifr->ifr_map.irq       = dev->irq;
4914                ifr->ifr_map.dma       = dev->dma;
4915                ifr->ifr_map.port      = dev->if_port;
4916                return 0;
4917
4918        case SIOCGIFINDEX:
4919                ifr->ifr_ifindex = dev->ifindex;
4920                return 0;
4921
4922        case SIOCGIFTXQLEN:
4923                ifr->ifr_qlen = dev->tx_queue_len;
4924                return 0;
4925
4926        default:
4927                /* dev_ioctl() should ensure this case
4928                 * is never reached
4929                 */
4930                WARN_ON(1);
4931                err = -ENOTTY;
4932                break;
4933
4934        }
4935        return err;
4936}
4937
4938/*
4939 *      Perform the SIOCxIFxxx calls, inside rtnl_lock()
4940 */
4941static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4942{
4943        int err;
4944        struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4945        const struct net_device_ops *ops;
4946
4947        if (!dev)
4948                return -ENODEV;
4949
4950        ops = dev->netdev_ops;
4951
4952        switch (cmd) {
4953        case SIOCSIFFLAGS:      /* Set interface flags */
4954                return dev_change_flags(dev, ifr->ifr_flags);
4955
4956        case SIOCSIFMETRIC:     /* Set the metric on the interface
4957                                   (currently unused) */
4958                return -EOPNOTSUPP;
4959
4960        case SIOCSIFMTU:        /* Set the MTU of a device */
4961                return dev_set_mtu(dev, ifr->ifr_mtu);
4962
4963        case SIOCSIFHWADDR:
4964                return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4965
4966        case SIOCSIFHWBROADCAST:
4967                if (ifr->ifr_hwaddr.sa_family != dev->type)
4968                        return -EINVAL;
4969                memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4970                       min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4971                call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4972                return 0;
4973
4974        case SIOCSIFMAP:
4975                if (ops->ndo_set_config) {
4976                        if (!netif_device_present(dev))
4977                                return -ENODEV;
4978                        return ops->ndo_set_config(dev, &ifr->ifr_map);
4979                }
4980                return -EOPNOTSUPP;
4981
4982        case SIOCADDMULTI:
4983                if (!ops->ndo_set_rx_mode ||
4984                    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4985                        return -EINVAL;
4986                if (!netif_device_present(dev))
4987                        return -ENODEV;
4988                return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
4989
4990        case SIOCDELMULTI:
4991                if (!ops->ndo_set_rx_mode ||
4992                    ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4993                        return -EINVAL;
4994                if (!netif_device_present(dev))
4995                        return -ENODEV;
4996                return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
4997
4998        case SIOCSIFTXQLEN:
4999                if (ifr->ifr_qlen < 0)
5000                        return -EINVAL;
5001                dev->tx_queue_len = ifr->ifr_qlen;
5002                return 0;
5003
5004        case SIOCSIFNAME:
5005                ifr->ifr_newname[IFNAMSIZ-1] = '\0';
5006                return dev_change_name(dev, ifr->ifr_newname);
5007
5008        case SIOCSHWTSTAMP:
5009                err = net_hwtstamp_validate(ifr);
5010                if (err)
5011                        return err;
5012                /* fall through */
5013
5014        /*
5015         *      Unknown or private ioctl
5016         */
5017        default:
5018                if ((cmd >= SIOCDEVPRIVATE &&
5019                    cmd <= SIOCDEVPRIVATE + 15) ||
5020                    cmd == SIOCBONDENSLAVE ||
5021                    cmd == SIOCBONDRELEASE ||
5022                    cmd == SIOCBONDSETHWADDR ||
5023                    cmd == SIOCBONDSLAVEINFOQUERY ||
5024                    cmd == SIOCBONDINFOQUERY ||
5025                    cmd == SIOCBONDCHANGEACTIVE ||
5026                    cmd == SIOCGMIIPHY ||
5027                    cmd == SIOCGMIIREG ||
5028                    cmd == SIOCSMIIREG ||
5029                    cmd == SIOCBRADDIF ||
5030                    cmd == SIOCBRDELIF ||
5031                    cmd == SIOCSHWTSTAMP ||
5032                    cmd == SIOCWANDEV) {
5033                        err = -EOPNOTSUPP;
5034                        if (ops->ndo_do_ioctl) {
5035                                if (netif_device_present(dev))
5036                                        err = ops->ndo_do_ioctl(dev, ifr, cmd);
5037                                else
5038                                        err = -ENODEV;
5039                        }
5040                } else
5041                        err = -EINVAL;
5042
5043        }
5044        return err;
5045}
5046
5047/*
5048 *      This function handles all "interface"-type I/O control requests. The actual
5049 *      'doing' part of this is dev_ifsioc above.
5050 */
5051
5052/**
5053 *      dev_ioctl       -       network device ioctl
5054 *      @net: the applicable net namespace
5055 *      @cmd: command to issue
5056 *      @arg: pointer to a struct ifreq in user space
5057 *
5058 *      Issue ioctl functions to devices. This is normally called by the
5059 *      user space syscall interfaces but can sometimes be useful for
5060 *      other purposes. The return value is the return from the syscall if
5061 *      positive or a negative errno code on error.
5062 */
5063
5064int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
5065{
5066        struct ifreq ifr;
5067        int ret;
5068        char *colon;
5069
5070        /* One special case: SIOCGIFCONF takes ifconf argument
5071           and requires shared lock, because it sleeps writing
5072           to user space.
5073         */
5074
5075        if (cmd == SIOCGIFCONF) {
5076                rtnl_lock();
5077                ret = dev_ifconf(net, (char __user *) arg);
5078                rtnl_unlock();
5079                return ret;
5080        }
5081        if (cmd == SIOCGIFNAME)
5082                return dev_ifname(net, (struct ifreq __user *)arg);
5083
5084        if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
5085                return -EFAULT;
5086
5087        ifr.ifr_name[IFNAMSIZ-1] = 0;
5088
5089        colon = strchr(ifr.ifr_name, ':');
5090        if (colon)
5091                *colon = 0;
5092
5093        /*
5094         *      See which interface the caller is talking about.
5095         */
5096
5097        switch (cmd) {
5098        /*
5099         *      These ioctl calls:
5100         *      - can be done by all.
5101         *      - atomic and do not require locking.
5102         *      - return a value
5103         */
5104        case SIOCGIFFLAGS:
5105        case SIOCGIFMETRIC:
5106        case SIOCGIFMTU:
5107        case SIOCGIFHWADDR:
5108        case SIOCGIFSLAVE:
5109        case SIOCGIFMAP:
5110        case SIOCGIFINDEX:
5111        case SIOCGIFTXQLEN:
5112                dev_load(net, ifr.ifr_name);
5113                rcu_read_lock();
5114                ret = dev_ifsioc_locked(net, &ifr, cmd);
5115                rcu_read_unlock();
5116                if (!ret) {
5117                        if (colon)
5118                                *colon = ':';
5119                        if (copy_to_user(arg, &ifr,
5120                                         sizeof(struct ifreq)))
5121                                ret = -EFAULT;
5122                }
5123                return ret;
5124
5125        case SIOCETHTOOL:
5126                dev_load(net, ifr.ifr_name);
5127                rtnl_lock();
5128                ret = dev_ethtool(net, &ifr);
5129                rtnl_unlock();
5130                if (!ret) {
5131                        if (colon)
5132                                *colon = ':';
5133                        if (copy_to_user(arg, &ifr,
5134                                         sizeof(struct ifreq)))
5135                                ret = -EFAULT;
5136                }
5137                return ret;
5138
5139        /*
5140         *      These ioctl calls:
5141         *      - require superuser power.
5142         *      - require strict serialization.
5143         *      - return a value
5144         */
5145        case SIOCGMIIPHY:
5146        case SIOCGMIIREG:
5147        case SIOCSIFNAME:
5148                if (!capable(CAP_NET_ADMIN))
5149                        return -EPERM;
5150                dev_load(net, ifr.ifr_name);
5151                rtnl_lock();
5152                ret = dev_ifsioc(net, &ifr, cmd);
5153                rtnl_unlock();
5154                if (!ret) {
5155                        if (colon)
5156                                *colon = ':';
5157                        if (copy_to_user(arg, &ifr,
5158                                         sizeof(struct ifreq)))
5159                                ret = -EFAULT;
5160                }
5161                return ret;
5162
5163        /*
5164         *      These ioctl calls:
5165         *      - require superuser power.
5166         *      - require strict serialization.
5167         *      - do not return a value
5168         */
5169        case SIOCSIFFLAGS:
5170        case SIOCSIFMETRIC:
5171        case SIOCSIFMTU:
5172        case SIOCSIFMAP:
5173        case SIOCSIFHWADDR:
5174        case SIOCSIFSLAVE:
5175        case SIOCADDMULTI:
5176        case SIOCDELMULTI:
5177        case SIOCSIFHWBROADCAST:
5178        case SIOCSIFTXQLEN:
5179        case SIOCSMIIREG:
5180        case SIOCBONDENSLAVE:
5181        case SIOCBONDRELEASE:
5182        case SIOCBONDSETHWADDR:
5183        case SIOCBONDCHANGEACTIVE:
5184        case SIOCBRADDIF:
5185        case SIOCBRDELIF:
5186        case SIOCSHWTSTAMP:
5187                if (!capable(CAP_NET_ADMIN))
5188                        return -EPERM;
5189                /* fall through */
5190        case SIOCBONDSLAVEINFOQUERY:
5191        case SIOCBONDINFOQUERY:
5192                dev_load(net, ifr.ifr_name);
5193                rtnl_lock();
5194                ret = dev_ifsioc(net, &ifr, cmd);
5195                rtnl_unlock();
5196                return ret;
5197
5198        case SIOCGIFMEM:
5199                /* Get the per device memory space. We can add this but
5200                 * currently do not support it */
5201        case SIOCSIFMEM:
5202                /* Set the per device memory buffer space.
5203                 * Not applicable in our case */
5204        case SIOCSIFLINK:
5205                return -ENOTTY;
5206
5207        /*
5208         *      Unknown or private ioctl.
5209         */
5210        default:
5211                if (cmd == SIOCWANDEV ||
5212                    (cmd >= SIOCDEVPRIVATE &&
5213                     cmd <= SIOCDEVPRIVATE + 15)) {
5214                        dev_load(net, ifr.ifr_name);
5215                        rtnl_lock();
5216                        ret = dev_ifsioc(net, &ifr, cmd);
5217                        rtnl_unlock();
5218                        if (!ret && copy_to_user(arg, &ifr,
5219                                                 sizeof(struct ifreq)))
5220                                ret = -EFAULT;
5221                        return ret;
5222                }
5223                /* Take care of Wireless Extensions */
5224                if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
5225                        return wext_handle_ioctl(net, &ifr, cmd, arg);
5226                return -ENOTTY;
5227        }
5228}
5229
5230
5231/**
5232 *      dev_new_index   -       allocate an ifindex
5233 *      @net: the applicable net namespace
5234 *
5235 *      Returns a suitable unique value for a new device interface
5236 *      number.  The caller must hold the rtnl semaphore or the
5237 *      dev_base_lock to be sure it remains unique.
5238 */
5239static int dev_new_index(struct net *net)
5240{
5241        static int ifindex;
5242        for (;;) {
5243                if (++ifindex <= 0)
5244                        ifindex = 1;
5245                if (!__dev_get_by_index(net, ifindex))
5246                        return ifindex;
5247        }
5248}
5249
5250/* Delayed registration/unregisteration */
5251static LIST_HEAD(net_todo_list);
5252
5253static void net_set_todo(struct net_device *dev)
5254{
5255        list_add_tail(&dev->todo_list, &net_todo_list);
5256}
5257
5258static void rollback_registered_many(struct list_head *head)
5259{
5260        struct net_device *dev, *tmp;
5261
5262        BUG_ON(dev_boot_phase);
5263        ASSERT_RTNL();
5264
5265        list_for_each_entry_safe(dev, tmp, head, unreg_list) {
5266                /* Some devices call without registering
5267                 * for initialization unwind. Remove those
5268                 * devices and proceed with the remaining.
5269                 */
5270                if (dev->reg_state == NETREG_UNINITIALIZED) {
5271                        pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5272                                 dev->name, dev);
5273
5274                        WARN_ON(1);
5275                        list_del(&dev->unreg_list);
5276                        continue;
5277                }
5278                dev->dismantle = true;
5279                BUG_ON(dev->reg_state != NETREG_REGISTERED);
5280        }
5281
5282        /* If device is running, close it first. */
5283        dev_close_many(head);
5284
5285        list_for_each_entry(dev, head, unreg_list) {
5286                /* And unlink it from device chain. */
5287                unlist_netdevice(dev);
5288
5289                dev->reg_state = NETREG_UNREGISTERING;
5290        }
5291
5292        synchronize_net();
5293
5294        list_for_each_entry(dev, head, unreg_list) {
5295                /* Shutdown queueing discipline. */
5296                dev_shutdown(dev);
5297
5298
5299                /* Notify protocols, that we are about to destroy
5300                   this device. They should clean all the things.
5301                */
5302                call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5303
5304                if (!dev->rtnl_link_ops ||
5305                    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5306                        rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5307
5308                /*
5309                 *      Flush the unicast and multicast chains
5310                 */
5311                dev_uc_flush(dev);
5312                dev_mc_flush(dev);
5313
5314                if (dev->netdev_ops->ndo_uninit)
5315                        dev->netdev_ops->ndo_uninit(dev);
5316
5317                /* Notifier chain MUST detach us from master device. */
5318                WARN_ON(dev->master);
5319
5320                /* Remove entries from kobject tree */
5321                netdev_unregister_kobject(dev);
5322        }
5323
5324        /* Process any work delayed until the end of the batch */
5325        dev = list_first_entry(head, struct net_device, unreg_list);
5326        call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5327
5328        synchronize_net();
5329
5330        list_for_each_entry(dev, head, unreg_list)
5331                dev_put(dev);
5332}
5333
5334static void rollback_registered(struct net_device *dev)
5335{
5336        LIST_HEAD(single);
5337
5338        list_add(&dev->unreg_list, &single);
5339        rollback_registered_many(&single);
5340        list_del(&single);
5341}
5342
5343static netdev_features_t netdev_fix_features(struct net_device *dev,
5344        netdev_features_t features)
5345{
5346        /* Fix illegal checksum combinations */
5347        if ((features & NETIF_F_HW_CSUM) &&
5348            (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5349                netdev_warn(dev, "mixed HW and IP checksum settings.\n");
5350                features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5351        }
5352
5353        /* Fix illegal SG+CSUM combinations. */
5354        if ((features & NETIF_F_SG) &&
5355            !(features & NETIF_F_ALL_CSUM)) {
5356                netdev_dbg(dev,
5357                        "Dropping NETIF_F_SG since no checksum feature.\n");
5358                features &= ~NETIF_F_SG;
5359        }
5360
5361        /* TSO requires that SG is present as well. */
5362        if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
5363                netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
5364                features &= ~NETIF_F_ALL_TSO;
5365        }
5366
5367        /* TSO ECN requires that TSO is present as well. */
5368        if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5369                features &= ~NETIF_F_TSO_ECN;
5370
5371        /* Software GSO depends on SG. */
5372        if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5373                netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
5374                features &= ~NETIF_F_GSO;
5375        }
5376
5377        /* UFO needs SG and checksumming */
5378        if (features & NETIF_F_UFO) {
5379                /* maybe split UFO into V4 and V6? */
5380                if (!((features & NETIF_F_GEN_CSUM) ||
5381                    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5382                            == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5383                        netdev_dbg(dev,
5384                                "Dropping NETIF_F_UFO since no checksum offload features.\n");
5385                        features &= ~NETIF_F_UFO;
5386                }
5387
5388                if (!(features & NETIF_F_SG)) {
5389                        netdev_dbg(dev,
5390                                "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
5391                        features &= ~NETIF_F_UFO;
5392                }
5393        }
5394
5395        return features;
5396}
5397
5398int __netdev_update_features(struct net_device *dev)
5399{
5400        netdev_features_t features;
5401        int err = 0;
5402
5403        ASSERT_RTNL();
5404
5405        features = netdev_get_wanted_features(dev);
5406
5407        if (dev->netdev_ops->ndo_fix_features)
5408                features = dev->netdev_ops->ndo_fix_features(dev, features);
5409
5410        /* driver might be less strict about feature dependencies */
5411        features = netdev_fix_features(dev, features);
5412
5413        if (dev->features == features)
5414                return 0;
5415
5416        netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5417                &dev->features, &features);
5418
5419        if (dev->netdev_ops->ndo_set_features)
5420                err = dev->netdev_ops->ndo_set_features(dev, features);
5421
5422        if (unlikely(err < 0)) {
5423                netdev_err(dev,
5424                        "set_features() failed (%d); wanted %pNF, left %pNF\n",
5425                        err, &features, &dev->features);
5426                return -1;
5427        }
5428
5429        if (!err)
5430                dev->features = features;
5431
5432        return 1;
5433}
5434
5435/**
5436 *      netdev_update_features - recalculate device features
5437 *      @dev: the device to check
5438 *
5439 *      Recalculate dev->features set and send notifications if it
5440 *      has changed. Should be called after driver or hardware dependent
5441 *      conditions might have changed that influence the features.
5442 */
5443void netdev_update_features(struct net_device *dev)
5444{
5445        if (__netdev_update_features(dev))
5446                netdev_features_change(dev);
5447}
5448EXPORT_SYMBOL(netdev_update_features);
5449
5450/**
5451 *      netdev_change_features - recalculate device features
5452 *      @dev: the device to check
5453 *
5454 *      Recalculate dev->features set and send notifications even
5455 *      if they have not changed. Should be called instead of
5456 *      netdev_update_features() if also dev->vlan_features might
5457 *      have changed to allow the changes to be propagated to stacked
5458 *      VLAN devices.
5459 */
5460void netdev_change_features(struct net_device *dev)
5461{
5462        __netdev_update_features(dev);
5463        netdev_features_change(dev);
5464}
5465EXPORT_SYMBOL(netdev_change_features);
5466
5467/**
5468 *      netif_stacked_transfer_operstate -      transfer operstate
5469 *      @rootdev: the root or lower level device to transfer state from
5470 *      @dev: the device to transfer operstate to
5471 *
5472 *      Transfer operational state from root to device. This is normally
5473 *      called when a stacking relationship exists between the root
5474 *      device and the device(a leaf device).
5475 */
5476void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5477                                        struct net_device *dev)
5478{
5479        if (rootdev->operstate == IF_OPER_DORMANT)
5480                netif_dormant_on(dev);
5481        else
5482                netif_dormant_off(dev);
5483
5484        if (netif_carrier_ok(rootdev)) {
5485                if (!netif_carrier_ok(dev))
5486                        netif_carrier_on(dev);
5487        } else {
5488                if (netif_carrier_ok(dev))
5489                        netif_carrier_off(dev);
5490        }
5491}
5492EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5493
5494#ifdef CONFIG_RPS
5495static int netif_alloc_rx_queues(struct net_device *dev)
5496{
5497        unsigned int i, count = dev->num_rx_queues;
5498        struct netdev_rx_queue *rx;
5499
5500        BUG_ON(count < 1);
5501
5502        rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5503        if (!rx) {
5504                pr_err("netdev: Unable to allocate %u rx queues\n", count);
5505                return -ENOMEM;
5506        }
5507        dev->_rx = rx;
5508
5509        for (i = 0; i < count; i++)
5510                rx[i].dev = dev;
5511        return 0;
5512}
5513#endif
5514
5515static void netdev_init_one_queue(struct net_device *dev,
5516                                  struct netdev_queue *queue, void *_unused)
5517{
5518        /* Initialize queue lock */
5519        spin_lock_init(&queue->_xmit_lock);
5520        netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5521        queue->xmit_lock_owner = -1;
5522        netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
5523        queue->dev = dev;
5524#ifdef CONFIG_BQL
5525        dql_init(&queue->dql, HZ);
5526#endif
5527}
5528
5529static int netif_alloc_netdev_queues(struct net_device *dev)
5530{
5531        unsigned int count = dev->num_tx_queues;
5532        struct netdev_queue *tx;
5533
5534        BUG_ON(count < 1);
5535
5536        tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5537        if (!tx) {
5538                pr_err("netdev: Unable to allocate %u tx queues\n", count);
5539                return -ENOMEM;
5540        }
5541        dev->_tx = tx;
5542
5543        netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5544        spin_lock_init(&dev->tx_global_lock);
5545
5546        return 0;
5547}
5548
5549/**
5550 *      register_netdevice      - register a network device
5551 *      @dev: device to register
5552 *
5553 *      Take a completed network device structure and add it to the kernel
5554 *      interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5555 *      chain. 0 is returned on success. A negative errno code is returned
5556 *      on a failure to set up the device, or if the name is a duplicate.
5557 *
5558 *      Callers must hold the rtnl semaphore. You may want
5559 *      register_netdev() instead of this.
5560 *
5561 *      BUGS:
5562 *      The locking appears insufficient to guarantee two parallel registers
5563 *      will not get the same name.
5564 */
5565
5566int register_netdevice(struct net_device *dev)
5567{
5568        int ret;
5569        struct net *net = dev_net(dev);
5570
5571        BUG_ON(dev_boot_phase);
5572        ASSERT_RTNL();
5573
5574        might_sleep();
5575
5576        /* When net_device's are persistent, this will be fatal. */
5577        BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
5578        BUG_ON(!net);
5579
5580        spin_lock_init(&dev->addr_list_lock);
5581        netdev_set_addr_lockdep_class(dev);
5582
5583        dev->iflink = -1;
5584
5585        ret = dev_get_valid_name(dev, dev->name);
5586        if (ret < 0)
5587                goto out;
5588
5589        /* Init, if this function is available */
5590        if (dev->netdev_ops->ndo_init) {
5591                ret = dev->netdev_ops->ndo_init(dev);
5592                if (ret) {
5593                        if (ret > 0)
5594                                ret = -EIO;
5595                        goto out;
5596                }
5597        }
5598
5599        dev->ifindex = dev_new_index(net);
5600        if (dev->iflink == -1)
5601                dev->iflink = dev->ifindex;
5602
5603        /* Transfer changeable features to wanted_features and enable
5604         * software offloads (GSO and GRO).
5605         */
5606        dev->hw_features |= NETIF_F_SOFT_FEATURES;
5607        dev->features |= NETIF_F_SOFT_FEATURES;
5608        dev->wanted_features = dev->features & dev->hw_features;
5609
5610        /* Turn on no cache copy if HW is doing checksum */
5611        if (!(dev->flags & IFF_LOOPBACK)) {
5612                dev->hw_features |= NETIF_F_NOCACHE_COPY;
5613                if (dev->features & NETIF_F_ALL_CSUM) {
5614                        dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5615                        dev->features |= NETIF_F_NOCACHE_COPY;
5616                }
5617        }
5618
5619        /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
5620         */
5621        dev->vlan_features |= NETIF_F_HIGHDMA;
5622
5623        ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5624        ret = notifier_to_errno(ret);
5625        if (ret)
5626                goto err_uninit;
5627
5628        ret = netdev_register_kobject(dev);
5629        if (ret)
5630                goto err_uninit;
5631        dev->reg_state = NETREG_REGISTERED;
5632
5633        __netdev_update_features(dev);
5634
5635        /*
5636         *      Default initial state at registry is that the
5637         *      device is present.
5638         */
5639
5640        set_bit(__LINK_STATE_PRESENT, &dev->state);
5641
5642        dev_init_scheduler(dev);
5643        dev_hold(dev);
5644        list_netdevice(dev);
5645        add_device_randomness(dev->dev_addr, dev->addr_len);
5646
5647        /* Notify protocols, that a new device appeared. */
5648        ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
5649        ret = notifier_to_errno(ret);
5650        if (ret) {
5651                rollback_registered(dev);
5652                dev->reg_state = NETREG_UNREGISTERED;
5653        }
5654        /*
5655         *      Prevent userspace races by waiting until the network
5656         *      device is fully setup before sending notifications.
5657         */
5658        if (!dev->rtnl_link_ops ||
5659            dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5660                rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5661
5662out:
5663        return ret;
5664
5665err_uninit:
5666        if (dev->netdev_ops->ndo_uninit)
5667                dev->netdev_ops->ndo_uninit(dev);
5668        goto out;
5669}
5670EXPORT_SYMBOL(register_netdevice);
5671
5672/**
5673 *      init_dummy_netdev       - init a dummy network device for NAPI
5674 *      @dev: device to init
5675 *
5676 *      This takes a network device structure and initialize the minimum
5677 *      amount of fields so it can be used to schedule NAPI polls without
5678 *      registering a full blown interface. This is to be used by drivers
5679 *      that need to tie several hardware interfaces to a single NAPI
5680 *      poll scheduler due to HW limitations.
5681 */
5682int init_dummy_netdev(struct net_device *dev)
5683{
5684        /* Clear everything. Note we don't initialize spinlocks
5685         * are they aren't supposed to be taken by any of the
5686         * NAPI code and this dummy netdev is supposed to be
5687         * only ever used for NAPI polls
5688         */
5689        memset(dev, 0, sizeof(struct net_device));
5690
5691        /* make sure we BUG if trying to hit standard
5692         * register/unregister code path
5693         */
5694        dev->reg_state = NETREG_DUMMY;
5695
5696        /* NAPI wants this */
5697        INIT_LIST_HEAD(&dev->napi_list);
5698
5699        /* a dummy interface is started by default */
5700        set_bit(__LINK_STATE_PRESENT, &dev->state);
5701        set_bit(__LINK_STATE_START, &dev->state);
5702
5703        /* Note : We dont allocate pcpu_refcnt for dummy devices,
5704         * because users of this 'device' dont need to change
5705         * its refcount.
5706         */
5707
5708        return 0;
5709}
5710EXPORT_SYMBOL_GPL(init_dummy_netdev);
5711
5712
5713/**
5714 *      register_netdev - register a network device
5715 *      @dev: device to register
5716 *
5717 *      Take a completed network device structure and add it to the kernel
5718 *      interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5719 *      chain. 0 is returned on success. A negative errno code is returned
5720 *      on a failure to set up the device, or if the name is a duplicate.
5721 *
5722 *      This is a wrapper around register_netdevice that takes the rtnl semaphore
5723 *      and expands the device name if you passed a format string to
5724 *      alloc_netdev.
5725 */
5726int register_netdev(struct net_device *dev)
5727{
5728        int err;
5729
5730        rtnl_lock();
5731        err = register_netdevice(dev);
5732        rtnl_unlock();
5733        return err;
5734}
5735EXPORT_SYMBOL(register_netdev);
5736
5737int netdev_refcnt_read(const struct net_device *dev)
5738{
5739        int i, refcnt = 0;
5740
5741        for_each_possible_cpu(i)
5742                refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5743        return refcnt;
5744}
5745EXPORT_SYMBOL(netdev_refcnt_read);
5746
5747/**
5748 * netdev_wait_allrefs - wait until all references are gone.
5749 * @dev: target net_device
5750 *
5751 * This is called when unregistering network devices.
5752 *
5753 * Any protocol or device that holds a reference should register
5754 * for netdevice notification, and cleanup and put back the
5755 * reference if they receive an UNREGISTER event.
5756 * We can get stuck here if buggy protocols don't correctly
5757 * call dev_put.
5758 */
5759static void netdev_wait_allrefs(struct net_device *dev)
5760{
5761        unsigned long rebroadcast_time, warning_time;
5762        int refcnt;
5763
5764        linkwatch_forget_dev(dev);
5765
5766        rebroadcast_time = warning_time = jiffies;
5767        refcnt = netdev_refcnt_read(dev);
5768
5769        while (refcnt != 0) {
5770                if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
5771                        rtnl_lock();
5772
5773                        /* Rebroadcast unregister notification */
5774                        call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5775                        /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
5776                         * should have already handle it the first time */
5777
5778                        if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5779                                     &dev->state)) {
5780                                /* We must not have linkwatch events
5781                                 * pending on unregister. If this
5782                                 * happens, we simply run the queue
5783                                 * unscheduled, resulting in a noop
5784                                 * for this device.
5785                                 */
5786                                linkwatch_run_queue();
5787                        }
5788
5789                        __rtnl_unlock();
5790
5791                        rebroadcast_time = jiffies;
5792                }
5793
5794                msleep(250);
5795
5796                refcnt = netdev_refcnt_read(dev);
5797
5798                if (time_after(jiffies, warning_time + 10 * HZ)) {
5799                        pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5800                                 dev->name, refcnt);
5801                        warning_time = jiffies;
5802                }
5803        }
5804}
5805
5806/* The sequence is:
5807 *
5808 *      rtnl_lock();
5809 *      ...
5810 *      register_netdevice(x1);
5811 *      register_netdevice(x2);
5812 *      ...
5813 *      unregister_netdevice(y1);
5814 *      unregister_netdevice(y2);
5815 *      ...
5816 *      rtnl_unlock();
5817 *      free_netdev(y1);
5818 *      free_netdev(y2);
5819 *
5820 * We are invoked by rtnl_unlock().
5821 * This allows us to deal with problems:
5822 * 1) We can delete sysfs objects which invoke hotplug
5823 *    without deadlocking with linkwatch via keventd.
5824 * 2) Since we run with the RTNL semaphore not held, we can sleep
5825 *    safely in order to wait for the netdev refcnt to drop to zero.
5826 *
5827 * We must not return until all unregister events added during
5828 * the interval the lock was held have been completed.
5829 */
5830void netdev_run_todo(void)
5831{
5832        struct list_head list;
5833
5834        /* Snapshot list, allow later requests */
5835        list_replace_init(&net_todo_list, &list);
5836
5837        __rtnl_unlock();
5838
5839        /* Wait for rcu callbacks to finish before attempting to drain
5840         * the device list.  This usually avoids a 250ms wait.
5841         */
5842        if (!list_empty(&list))
5843                rcu_barrier();
5844
5845        while (!list_empty(&list)) {
5846                struct net_device *dev
5847                        = list_first_entry(&list, struct net_device, todo_list);
5848                list_del(&dev->todo_list);
5849
5850                if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5851                        pr_err("network todo '%s' but state %d\n",
5852                               dev->name, dev->reg_state);
5853