linux/net/core/dev.c
<<
>>
Prefs
   1/*
   2 *      NET3    Protocol independent device support routines.
   3 *
   4 *              This program is free software; you can redistribute it and/or
   5 *              modify it under the terms of the GNU General Public License
   6 *              as published by the Free Software Foundation; either version
   7 *              2 of the License, or (at your option) any later version.
   8 *
   9 *      Derived from the non IP parts of dev.c 1.0.19
  10 *              Authors:        Ross Biro
  11 *                              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *                              Mark Evans, <evansmp@uhura.aston.ac.uk>
  13 *
  14 *      Additional Authors:
  15 *              Florian la Roche <rzsfl@rz.uni-sb.de>
  16 *              Alan Cox <gw4pts@gw4pts.ampr.org>
  17 *              David Hinds <dahinds@users.sourceforge.net>
  18 *              Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
  19 *              Adam Sulmicki <adam@cfar.umd.edu>
  20 *              Pekka Riikonen <priikone@poesidon.pspt.fi>
  21 *
  22 *      Changes:
  23 *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
  24 *                                      to 2 if register_netdev gets called
  25 *                                      before net_dev_init & also removed a
  26 *                                      few lines of code in the process.
  27 *              Alan Cox        :       device private ioctl copies fields back.
  28 *              Alan Cox        :       Transmit queue code does relevant
  29 *                                      stunts to keep the queue safe.
  30 *              Alan Cox        :       Fixed double lock.
  31 *              Alan Cox        :       Fixed promisc NULL pointer trap
  32 *              ????????        :       Support the full private ioctl range
  33 *              Alan Cox        :       Moved ioctl permission check into
  34 *                                      drivers
  35 *              Tim Kordas      :       SIOCADDMULTI/SIOCDELMULTI
  36 *              Alan Cox        :       100 backlog just doesn't cut it when
  37 *                                      you start doing multicast video 8)
  38 *              Alan Cox        :       Rewrote net_bh and list manager.
  39 *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
  40 *              Alan Cox        :       Took out transmit every packet pass
  41 *                                      Saved a few bytes in the ioctl handler
  42 *              Alan Cox        :       Network driver sets packet type before
  43 *                                      calling netif_rx. Saves a function
  44 *                                      call a packet.
  45 *              Alan Cox        :       Hashed net_bh()
  46 *              Richard Kooijman:       Timestamp fixes.
  47 *              Alan Cox        :       Wrong field in SIOCGIFDSTADDR
  48 *              Alan Cox        :       Device lock protection.
  49 *              Alan Cox        :       Fixed nasty side effect of device close
  50 *                                      changes.
  51 *              Rudi Cilibrasi  :       Pass the right thing to
  52 *                                      set_mac_address()
  53 *              Dave Miller     :       32bit quantity for the device lock to
  54 *                                      make it work out on a Sparc.
  55 *              Bjorn Ekwall    :       Added KERNELD hack.
  56 *              Alan Cox        :       Cleaned up the backlog initialise.
  57 *              Craig Metz      :       SIOCGIFCONF fix if space for under
  58 *                                      1 device.
  59 *          Thomas Bogendoerfer :       Return ENODEV for dev_open, if there
  60 *                                      is no device open function.
  61 *              Andi Kleen      :       Fix error reporting for SIOCGIFCONF
  62 *          Michael Chastain    :       Fix signed/unsigned for SIOCGIFCONF
  63 *              Cyrus Durgin    :       Cleaned for KMOD
  64 *              Adam Sulmicki   :       Bug Fix : Network Device Unload
  65 *                                      A network device unload needs to purge
  66 *                                      the backlog queue.
  67 *      Paul Rusty Russell      :       SIOCSIFNAME
  68 *              Pekka Riikonen  :       Netdev boot-time settings code
  69 *              Andrew Morton   :       Make unregister_netdevice wait
  70 *                                      indefinitely on dev->refcnt
  71 *              J Hadi Salim    :       - Backlog queue sampling
  72 *                                      - netif_rx() feedback
  73 */
  74
  75#include <asm/uaccess.h>
  76#include <linux/bitops.h>
  77#include <linux/capability.h>
  78#include <linux/cpu.h>
  79#include <linux/types.h>
  80#include <linux/kernel.h>
  81#include <linux/hash.h>
  82#include <linux/slab.h>
  83#include <linux/sched.h>
  84#include <linux/mutex.h>
  85#include <linux/string.h>
  86#include <linux/mm.h>
  87#include <linux/socket.h>
  88#include <linux/sockios.h>
  89#include <linux/errno.h>
  90#include <linux/interrupt.h>
  91#include <linux/if_ether.h>
  92#include <linux/netdevice.h>
  93#include <linux/etherdevice.h>
  94#include <linux/ethtool.h>
  95#include <linux/notifier.h>
  96#include <linux/skbuff.h>
  97#include <net/net_namespace.h>
  98#include <net/sock.h>
  99#include <net/busy_poll.h>
 100#include <linux/rtnetlink.h>
 101#include <linux/stat.h>
 102#include <net/dst.h>
 103#include <net/dst_metadata.h>
 104#include <net/pkt_sched.h>
 105#include <net/checksum.h>
 106#include <net/xfrm.h>
 107#include <linux/highmem.h>
 108#include <linux/init.h>
 109#include <linux/module.h>
 110#include <linux/netpoll.h>
 111#include <linux/rcupdate.h>
 112#include <linux/delay.h>
 113#include <net/iw_handler.h>
 114#include <asm/current.h>
 115#include <linux/audit.h>
 116#include <linux/dmaengine.h>
 117#include <linux/err.h>
 118#include <linux/ctype.h>
 119#include <linux/if_arp.h>
 120#include <linux/if_vlan.h>
 121#include <linux/ip.h>
 122#include <net/ip.h>
 123#include <net/mpls.h>
 124#include <linux/ipv6.h>
 125#include <linux/in.h>
 126#include <linux/jhash.h>
 127#include <linux/random.h>
 128#include <trace/events/napi.h>
 129#include <trace/events/net.h>
 130#include <trace/events/skb.h>
 131#include <linux/pci.h>
 132#include <linux/inetdevice.h>
 133#include <linux/cpu_rmap.h>
 134#include <linux/static_key.h>
 135#include <linux/hashtable.h>
 136#include <linux/vmalloc.h>
 137#include <linux/if_macvlan.h>
 138#include <linux/errqueue.h>
 139#include <linux/hrtimer.h>
 140#include <linux/netfilter_ingress.h>
 141#include <linux/sctp.h>
 142
 143#include "net-sysfs.h"
 144
 145/* Instead of increasing this, you should create a hash table. */
 146#define MAX_GRO_SKBS 8
 147
 148/* This should be increased if a protocol with a bigger head is added. */
 149#define GRO_MAX_HEAD (MAX_HEADER + 128)
 150
 151static DEFINE_SPINLOCK(ptype_lock);
 152static DEFINE_SPINLOCK(offload_lock);
 153struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
 154struct list_head ptype_all __read_mostly;       /* Taps */
 155static struct list_head offload_base __read_mostly;
 156
 157static int netif_rx_internal(struct sk_buff *skb);
 158static int call_netdevice_notifiers_info(unsigned long val,
 159                                         struct net_device *dev,
 160                                         struct netdev_notifier_info *info);
 161
 162/*
 163 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
 164 * semaphore.
 165 *
 166 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
 167 *
 168 * Writers must hold the rtnl semaphore while they loop through the
 169 * dev_base_head list, and hold dev_base_lock for writing when they do the
 170 * actual updates.  This allows pure readers to access the list even
 171 * while a writer is preparing to update it.
 172 *
 173 * To put it another way, dev_base_lock is held for writing only to
 174 * protect against pure readers; the rtnl semaphore provides the
 175 * protection against other writers.
 176 *
 177 * See, for example usages, register_netdevice() and
 178 * unregister_netdevice(), which must be called with the rtnl
 179 * semaphore held.
 180 */
 181DEFINE_RWLOCK(dev_base_lock);
 182EXPORT_SYMBOL(dev_base_lock);
 183
 184/* protects napi_hash addition/deletion and napi_gen_id */
 185static DEFINE_SPINLOCK(napi_hash_lock);
 186
 187static unsigned int napi_gen_id = NR_CPUS;
 188static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
 189
 190static seqcount_t devnet_rename_seq;
 191
 192static inline void dev_base_seq_inc(struct net *net)
 193{
 194        while (++net->dev_base_seq == 0);
 195}
 196
 197static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
 198{
 199        unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
 200
 201        return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
 202}
 203
 204static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
 205{
 206        return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
 207}
 208
 209static inline void rps_lock(struct softnet_data *sd)
 210{
 211#ifdef CONFIG_RPS
 212        spin_lock(&sd->input_pkt_queue.lock);
 213#endif
 214}
 215
 216static inline void rps_unlock(struct softnet_data *sd)
 217{
 218#ifdef CONFIG_RPS
 219        spin_unlock(&sd->input_pkt_queue.lock);
 220#endif
 221}
 222
 223/* Device list insertion */
 224static void list_netdevice(struct net_device *dev)
 225{
 226        struct net *net = dev_net(dev);
 227
 228        ASSERT_RTNL();
 229
 230        write_lock_bh(&dev_base_lock);
 231        list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
 232        hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
 233        hlist_add_head_rcu(&dev->index_hlist,
 234                           dev_index_hash(net, dev->ifindex));
 235        write_unlock_bh(&dev_base_lock);
 236
 237        dev_base_seq_inc(net);
 238}
 239
 240/* Device list removal
 241 * caller must respect a RCU grace period before freeing/reusing dev
 242 */
 243static void unlist_netdevice(struct net_device *dev)
 244{
 245        ASSERT_RTNL();
 246
 247        /* Unlink dev from the device chain */
 248        write_lock_bh(&dev_base_lock);
 249        list_del_rcu(&dev->dev_list);
 250        hlist_del_rcu(&dev->name_hlist);
 251        hlist_del_rcu(&dev->index_hlist);
 252        write_unlock_bh(&dev_base_lock);
 253
 254        dev_base_seq_inc(dev_net(dev));
 255}
 256
 257/*
 258 *      Our notifier list
 259 */
 260
 261static RAW_NOTIFIER_HEAD(netdev_chain);
 262
 263/*
 264 *      Device drivers call our routines to queue packets here. We empty the
 265 *      queue in the local softnet handler.
 266 */
 267
 268DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
 269EXPORT_PER_CPU_SYMBOL(softnet_data);
 270
 271#ifdef CONFIG_LOCKDEP
 272/*
 273 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
 274 * according to dev->type
 275 */
 276static const unsigned short netdev_lock_type[] =
 277        {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
 278         ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
 279         ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
 280         ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
 281         ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
 282         ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
 283         ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
 284         ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
 285         ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
 286         ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
 287         ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
 288         ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
 289         ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
 290         ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
 291         ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
 292
 293static const char *const netdev_lock_name[] =
 294        {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
 295         "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
 296         "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
 297         "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
 298         "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
 299         "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
 300         "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
 301         "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
 302         "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
 303         "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
 304         "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
 305         "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
 306         "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
 307         "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
 308         "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
 309
 310static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
 311static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
 312
 313static inline unsigned short netdev_lock_pos(unsigned short dev_type)
 314{
 315        int i;
 316
 317        for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
 318                if (netdev_lock_type[i] == dev_type)
 319                        return i;
 320        /* the last key is used by default */
 321        return ARRAY_SIZE(netdev_lock_type) - 1;
 322}
 323
 324static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
 325                                                 unsigned short dev_type)
 326{
 327        int i;
 328
 329        i = netdev_lock_pos(dev_type);
 330        lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
 331                                   netdev_lock_name[i]);
 332}
 333
 334static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
 335{
 336        int i;
 337
 338        i = netdev_lock_pos(dev->type);
 339        lockdep_set_class_and_name(&dev->addr_list_lock,
 340                                   &netdev_addr_lock_key[i],
 341                                   netdev_lock_name[i]);
 342}
 343#else
 344static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
 345                                                 unsigned short dev_type)
 346{
 347}
 348static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
 349{
 350}
 351#endif
 352
 353/*******************************************************************************
 354
 355                Protocol management and registration routines
 356
 357*******************************************************************************/
 358
 359/*
 360 *      Add a protocol ID to the list. Now that the input handler is
 361 *      smarter we can dispense with all the messy stuff that used to be
 362 *      here.
 363 *
 364 *      BEWARE!!! Protocol handlers, mangling input packets,
 365 *      MUST BE last in hash buckets and checking protocol handlers
 366 *      MUST start from promiscuous ptype_all chain in net_bh.
 367 *      It is true now, do not change it.
 368 *      Explanation follows: if protocol handler, mangling packet, will
 369 *      be the first on list, it is not able to sense, that packet
 370 *      is cloned and should be copied-on-write, so that it will
 371 *      change it and subsequent readers will get broken packet.
 372 *                                                      --ANK (980803)
 373 */
 374
 375static inline struct list_head *ptype_head(const struct packet_type *pt)
 376{
 377        if (pt->type == htons(ETH_P_ALL))
 378                return pt->dev ? &pt->dev->ptype_all : &ptype_all;
 379        else
 380                return pt->dev ? &pt->dev->ptype_specific :
 381                                 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
 382}
 383
 384/**
 385 *      dev_add_pack - add packet handler
 386 *      @pt: packet type declaration
 387 *
 388 *      Add a protocol handler to the networking stack. The passed &packet_type
 389 *      is linked into kernel lists and may not be freed until it has been
 390 *      removed from the kernel lists.
 391 *
 392 *      This call does not sleep therefore it can not
 393 *      guarantee all CPU's that are in middle of receiving packets
 394 *      will see the new packet type (until the next received packet).
 395 */
 396
 397void dev_add_pack(struct packet_type *pt)
 398{
 399        struct list_head *head = ptype_head(pt);
 400
 401        spin_lock(&ptype_lock);
 402        list_add_rcu(&pt->list, head);
 403        spin_unlock(&ptype_lock);
 404}
 405EXPORT_SYMBOL(dev_add_pack);
 406
 407/**
 408 *      __dev_remove_pack        - remove packet handler
 409 *      @pt: packet type declaration
 410 *
 411 *      Remove a protocol handler that was previously added to the kernel
 412 *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
 413 *      from the kernel lists and can be freed or reused once this function
 414 *      returns.
 415 *
 416 *      The packet type might still be in use by receivers
 417 *      and must not be freed until after all the CPU's have gone
 418 *      through a quiescent state.
 419 */
 420void __dev_remove_pack(struct packet_type *pt)
 421{
 422        struct list_head *head = ptype_head(pt);
 423        struct packet_type *pt1;
 424
 425        spin_lock(&ptype_lock);
 426
 427        list_for_each_entry(pt1, head, list) {
 428                if (pt == pt1) {
 429                        list_del_rcu(&pt->list);
 430                        goto out;
 431                }
 432        }
 433
 434        pr_warn("dev_remove_pack: %p not found\n", pt);
 435out:
 436        spin_unlock(&ptype_lock);
 437}
 438EXPORT_SYMBOL(__dev_remove_pack);
 439
 440/**
 441 *      dev_remove_pack  - remove packet handler
 442 *      @pt: packet type declaration
 443 *
 444 *      Remove a protocol handler that was previously added to the kernel
 445 *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
 446 *      from the kernel lists and can be freed or reused once this function
 447 *      returns.
 448 *
 449 *      This call sleeps to guarantee that no CPU is looking at the packet
 450 *      type after return.
 451 */
 452void dev_remove_pack(struct packet_type *pt)
 453{
 454        __dev_remove_pack(pt);
 455
 456        synchronize_net();
 457}
 458EXPORT_SYMBOL(dev_remove_pack);
 459
 460
 461/**
 462 *      dev_add_offload - register offload handlers
 463 *      @po: protocol offload declaration
 464 *
 465 *      Add protocol offload handlers to the networking stack. The passed
 466 *      &proto_offload is linked into kernel lists and may not be freed until
 467 *      it has been removed from the kernel lists.
 468 *
 469 *      This call does not sleep therefore it can not
 470 *      guarantee all CPU's that are in middle of receiving packets
 471 *      will see the new offload handlers (until the next received packet).
 472 */
 473void dev_add_offload(struct packet_offload *po)
 474{
 475        struct packet_offload *elem;
 476
 477        spin_lock(&offload_lock);
 478        list_for_each_entry(elem, &offload_base, list) {
 479                if (po->priority < elem->priority)
 480                        break;
 481        }
 482        list_add_rcu(&po->list, elem->list.prev);
 483        spin_unlock(&offload_lock);
 484}
 485EXPORT_SYMBOL(dev_add_offload);
 486
 487/**
 488 *      __dev_remove_offload     - remove offload handler
 489 *      @po: packet offload declaration
 490 *
 491 *      Remove a protocol offload handler that was previously added to the
 492 *      kernel offload handlers by dev_add_offload(). The passed &offload_type
 493 *      is removed from the kernel lists and can be freed or reused once this
 494 *      function returns.
 495 *
 496 *      The packet type might still be in use by receivers
 497 *      and must not be freed until after all the CPU's have gone
 498 *      through a quiescent state.
 499 */
 500static void __dev_remove_offload(struct packet_offload *po)
 501{
 502        struct list_head *head = &offload_base;
 503        struct packet_offload *po1;
 504
 505        spin_lock(&offload_lock);
 506
 507        list_for_each_entry(po1, head, list) {
 508                if (po == po1) {
 509                        list_del_rcu(&po->list);
 510                        goto out;
 511                }
 512        }
 513
 514        pr_warn("dev_remove_offload: %p not found\n", po);
 515out:
 516        spin_unlock(&offload_lock);
 517}
 518
 519/**
 520 *      dev_remove_offload       - remove packet offload handler
 521 *      @po: packet offload declaration
 522 *
 523 *      Remove a packet offload handler that was previously added to the kernel
 524 *      offload handlers by dev_add_offload(). The passed &offload_type is
 525 *      removed from the kernel lists and can be freed or reused once this
 526 *      function returns.
 527 *
 528 *      This call sleeps to guarantee that no CPU is looking at the packet
 529 *      type after return.
 530 */
 531void dev_remove_offload(struct packet_offload *po)
 532{
 533        __dev_remove_offload(po);
 534
 535        synchronize_net();
 536}
 537EXPORT_SYMBOL(dev_remove_offload);
 538
 539/******************************************************************************
 540
 541                      Device Boot-time Settings Routines
 542
 543*******************************************************************************/
 544
 545/* Boot time configuration table */
 546static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
 547
 548/**
 549 *      netdev_boot_setup_add   - add new setup entry
 550 *      @name: name of the device
 551 *      @map: configured settings for the device
 552 *
 553 *      Adds new setup entry to the dev_boot_setup list.  The function
 554 *      returns 0 on error and 1 on success.  This is a generic routine to
 555 *      all netdevices.
 556 */
 557static int netdev_boot_setup_add(char *name, struct ifmap *map)
 558{
 559        struct netdev_boot_setup *s;
 560        int i;
 561
 562        s = dev_boot_setup;
 563        for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
 564                if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
 565                        memset(s[i].name, 0, sizeof(s[i].name));
 566                        strlcpy(s[i].name, name, IFNAMSIZ);
 567                        memcpy(&s[i].map, map, sizeof(s[i].map));
 568                        break;
 569                }
 570        }
 571
 572        return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
 573}
 574
 575/**
 576 *      netdev_boot_setup_check - check boot time settings
 577 *      @dev: the netdevice
 578 *
 579 *      Check boot time settings for the device.
 580 *      The found settings are set for the device to be used
 581 *      later in the device probing.
 582 *      Returns 0 if no settings found, 1 if they are.
 583 */
 584int netdev_boot_setup_check(struct net_device *dev)
 585{
 586        struct netdev_boot_setup *s = dev_boot_setup;
 587        int i;
 588
 589        for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
 590                if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
 591                    !strcmp(dev->name, s[i].name)) {
 592                        dev->irq        = s[i].map.irq;
 593                        dev->base_addr  = s[i].map.base_addr;
 594                        dev->mem_start  = s[i].map.mem_start;
 595                        dev->mem_end    = s[i].map.mem_end;
 596                        return 1;
 597                }
 598        }
 599        return 0;
 600}
 601EXPORT_SYMBOL(netdev_boot_setup_check);
 602
 603
 604/**
 605 *      netdev_boot_base        - get address from boot time settings
 606 *      @prefix: prefix for network device
 607 *      @unit: id for network device
 608 *
 609 *      Check boot time settings for the base address of device.
 610 *      The found settings are set for the device to be used
 611 *      later in the device probing.
 612 *      Returns 0 if no settings found.
 613 */
 614unsigned long netdev_boot_base(const char *prefix, int unit)
 615{
 616        const struct netdev_boot_setup *s = dev_boot_setup;
 617        char name[IFNAMSIZ];
 618        int i;
 619
 620        sprintf(name, "%s%d", prefix, unit);
 621
 622        /*
 623         * If device already registered then return base of 1
 624         * to indicate not to probe for this interface
 625         */
 626        if (__dev_get_by_name(&init_net, name))
 627                return 1;
 628
 629        for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
 630                if (!strcmp(name, s[i].name))
 631                        return s[i].map.base_addr;
 632        return 0;
 633}
 634
 635/*
 636 * Saves at boot time configured settings for any netdevice.
 637 */
 638int __init netdev_boot_setup(char *str)
 639{
 640        int ints[5];
 641        struct ifmap map;
 642
 643        str = get_options(str, ARRAY_SIZE(ints), ints);
 644        if (!str || !*str)
 645                return 0;
 646
 647        /* Save settings */
 648        memset(&map, 0, sizeof(map));
 649        if (ints[0] > 0)
 650                map.irq = ints[1];
 651        if (ints[0] > 1)
 652                map.base_addr = ints[2];
 653        if (ints[0] > 2)
 654                map.mem_start = ints[3];
 655        if (ints[0] > 3)
 656                map.mem_end = ints[4];
 657
 658        /* Add new entry to the list */
 659        return netdev_boot_setup_add(str, &map);
 660}
 661
 662__setup("netdev=", netdev_boot_setup);
 663
 664/*******************************************************************************
 665
 666                            Device Interface Subroutines
 667
 668*******************************************************************************/
 669
 670/**
 671 *      dev_get_iflink  - get 'iflink' value of a interface
 672 *      @dev: targeted interface
 673 *
 674 *      Indicates the ifindex the interface is linked to.
 675 *      Physical interfaces have the same 'ifindex' and 'iflink' values.
 676 */
 677
 678int dev_get_iflink(const struct net_device *dev)
 679{
 680        if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
 681                return dev->netdev_ops->ndo_get_iflink(dev);
 682
 683        return dev->ifindex;
 684}
 685EXPORT_SYMBOL(dev_get_iflink);
 686
 687/**
 688 *      dev_fill_metadata_dst - Retrieve tunnel egress information.
 689 *      @dev: targeted interface
 690 *      @skb: The packet.
 691 *
 692 *      For better visibility of tunnel traffic OVS needs to retrieve
 693 *      egress tunnel information for a packet. Following API allows
 694 *      user to get this info.
 695 */
 696int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
 697{
 698        struct ip_tunnel_info *info;
 699
 700        if (!dev->netdev_ops  || !dev->netdev_ops->ndo_fill_metadata_dst)
 701                return -EINVAL;
 702
 703        info = skb_tunnel_info_unclone(skb);
 704        if (!info)
 705                return -ENOMEM;
 706        if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
 707                return -EINVAL;
 708
 709        return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
 710}
 711EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
 712
 713/**
 714 *      __dev_get_by_name       - find a device by its name
 715 *      @net: the applicable net namespace
 716 *      @name: name to find
 717 *
 718 *      Find an interface by name. Must be called under RTNL semaphore
 719 *      or @dev_base_lock. If the name is found a pointer to the device
 720 *      is returned. If the name is not found then %NULL is returned. The
 721 *      reference counters are not incremented so the caller must be
 722 *      careful with locks.
 723 */
 724
 725struct net_device *__dev_get_by_name(struct net *net, const char *name)
 726{
 727        struct net_device *dev;
 728        struct hlist_head *head = dev_name_hash(net, name);
 729
 730        hlist_for_each_entry(dev, head, name_hlist)
 731                if (!strncmp(dev->name, name, IFNAMSIZ))
 732                        return dev;
 733
 734        return NULL;
 735}
 736EXPORT_SYMBOL(__dev_get_by_name);
 737
 738/**
 739 *      dev_get_by_name_rcu     - find a device by its name
 740 *      @net: the applicable net namespace
 741 *      @name: name to find
 742 *
 743 *      Find an interface by name.
 744 *      If the name is found a pointer to the device is returned.
 745 *      If the name is not found then %NULL is returned.
 746 *      The reference counters are not incremented so the caller must be
 747 *      careful with locks. The caller must hold RCU lock.
 748 */
 749
 750struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
 751{
 752        struct net_device *dev;
 753        struct hlist_head *head = dev_name_hash(net, name);
 754
 755        hlist_for_each_entry_rcu(dev, head, name_hlist)
 756                if (!strncmp(dev->name, name, IFNAMSIZ))
 757                        return dev;
 758
 759        return NULL;
 760}
 761EXPORT_SYMBOL(dev_get_by_name_rcu);
 762
 763/**
 764 *      dev_get_by_name         - find a device by its name
 765 *      @net: the applicable net namespace
 766 *      @name: name to find
 767 *
 768 *      Find an interface by name. This can be called from any
 769 *      context and does its own locking. The returned handle has
 770 *      the usage count incremented and the caller must use dev_put() to
 771 *      release it when it is no longer needed. %NULL is returned if no
 772 *      matching device is found.
 773 */
 774
 775struct net_device *dev_get_by_name(struct net *net, const char *name)
 776{
 777        struct net_device *dev;
 778
 779        rcu_read_lock();
 780        dev = dev_get_by_name_rcu(net, name);
 781        if (dev)
 782                dev_hold(dev);
 783        rcu_read_unlock();
 784        return dev;
 785}
 786EXPORT_SYMBOL(dev_get_by_name);
 787
 788/**
 789 *      __dev_get_by_index - find a device by its ifindex
 790 *      @net: the applicable net namespace
 791 *      @ifindex: index of device
 792 *
 793 *      Search for an interface by index. Returns %NULL if the device
 794 *      is not found or a pointer to the device. The device has not
 795 *      had its reference counter increased so the caller must be careful
 796 *      about locking. The caller must hold either the RTNL semaphore
 797 *      or @dev_base_lock.
 798 */
 799
 800struct net_device *__dev_get_by_index(struct net *net, int ifindex)
 801{
 802        struct net_device *dev;
 803        struct hlist_head *head = dev_index_hash(net, ifindex);
 804
 805        hlist_for_each_entry(dev, head, index_hlist)
 806                if (dev->ifindex == ifindex)
 807                        return dev;
 808
 809        return NULL;
 810}
 811EXPORT_SYMBOL(__dev_get_by_index);
 812
 813/**
 814 *      dev_get_by_index_rcu - find a device by its ifindex
 815 *      @net: the applicable net namespace
 816 *      @ifindex: index of device
 817 *
 818 *      Search for an interface by index. Returns %NULL if the device
 819 *      is not found or a pointer to the device. The device has not
 820 *      had its reference counter increased so the caller must be careful
 821 *      about locking. The caller must hold RCU lock.
 822 */
 823
 824struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
 825{
 826        struct net_device *dev;
 827        struct hlist_head *head = dev_index_hash(net, ifindex);
 828
 829        hlist_for_each_entry_rcu(dev, head, index_hlist)
 830                if (dev->ifindex == ifindex)
 831                        return dev;
 832
 833        return NULL;
 834}
 835EXPORT_SYMBOL(dev_get_by_index_rcu);
 836
 837
 838/**
 839 *      dev_get_by_index - find a device by its ifindex
 840 *      @net: the applicable net namespace
 841 *      @ifindex: index of device
 842 *
 843 *      Search for an interface by index. Returns NULL if the device
 844 *      is not found or a pointer to the device. The device returned has
 845 *      had a reference added and the pointer is safe until the user calls
 846 *      dev_put to indicate they have finished with it.
 847 */
 848
 849struct net_device *dev_get_by_index(struct net *net, int ifindex)
 850{
 851        struct net_device *dev;
 852
 853        rcu_read_lock();
 854        dev = dev_get_by_index_rcu(net, ifindex);
 855        if (dev)
 856                dev_hold(dev);
 857        rcu_read_unlock();
 858        return dev;
 859}
 860EXPORT_SYMBOL(dev_get_by_index);
 861
 862/**
 863 *      netdev_get_name - get a netdevice name, knowing its ifindex.
 864 *      @net: network namespace
 865 *      @name: a pointer to the buffer where the name will be stored.
 866 *      @ifindex: the ifindex of the interface to get the name from.
 867 *
 868 *      The use of raw_seqcount_begin() and cond_resched() before
 869 *      retrying is required as we want to give the writers a chance
 870 *      to complete when CONFIG_PREEMPT is not set.
 871 */
 872int netdev_get_name(struct net *net, char *name, int ifindex)
 873{
 874        struct net_device *dev;
 875        unsigned int seq;
 876
 877retry:
 878        seq = raw_seqcount_begin(&devnet_rename_seq);
 879        rcu_read_lock();
 880        dev = dev_get_by_index_rcu(net, ifindex);
 881        if (!dev) {
 882                rcu_read_unlock();
 883                return -ENODEV;
 884        }
 885
 886        strcpy(name, dev->name);
 887        rcu_read_unlock();
 888        if (read_seqcount_retry(&devnet_rename_seq, seq)) {
 889                cond_resched();
 890                goto retry;
 891        }
 892
 893        return 0;
 894}
 895
 896/**
 897 *      dev_getbyhwaddr_rcu - find a device by its hardware address
 898 *      @net: the applicable net namespace
 899 *      @type: media type of device
 900 *      @ha: hardware address
 901 *
 902 *      Search for an interface by MAC address. Returns NULL if the device
 903 *      is not found or a pointer to the device.
 904 *      The caller must hold RCU or RTNL.
 905 *      The returned device has not had its ref count increased
 906 *      and the caller must therefore be careful about locking
 907 *
 908 */
 909
 910struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
 911                                       const char *ha)
 912{
 913        struct net_device *dev;
 914
 915        for_each_netdev_rcu(net, dev)
 916                if (dev->type == type &&
 917                    !memcmp(dev->dev_addr, ha, dev->addr_len))
 918                        return dev;
 919
 920        return NULL;
 921}
 922EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
 923
 924struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
 925{
 926        struct net_device *dev;
 927
 928        ASSERT_RTNL();
 929        for_each_netdev(net, dev)
 930                if (dev->type == type)
 931                        return dev;
 932
 933        return NULL;
 934}
 935EXPORT_SYMBOL(__dev_getfirstbyhwtype);
 936
 937struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
 938{
 939        struct net_device *dev, *ret = NULL;
 940
 941        rcu_read_lock();
 942        for_each_netdev_rcu(net, dev)
 943                if (dev->type == type) {
 944                        dev_hold(dev);
 945                        ret = dev;
 946                        break;
 947                }
 948        rcu_read_unlock();
 949        return ret;
 950}
 951EXPORT_SYMBOL(dev_getfirstbyhwtype);
 952
 953/**
 954 *      __dev_get_by_flags - find any device with given flags
 955 *      @net: the applicable net namespace
 956 *      @if_flags: IFF_* values
 957 *      @mask: bitmask of bits in if_flags to check
 958 *
 959 *      Search for any interface with the given flags. Returns NULL if a device
 960 *      is not found or a pointer to the device. Must be called inside
 961 *      rtnl_lock(), and result refcount is unchanged.
 962 */
 963
 964struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
 965                                      unsigned short mask)
 966{
 967        struct net_device *dev, *ret;
 968
 969        ASSERT_RTNL();
 970
 971        ret = NULL;
 972        for_each_netdev(net, dev) {
 973                if (((dev->flags ^ if_flags) & mask) == 0) {
 974                        ret = dev;
 975                        break;
 976                }
 977        }
 978        return ret;
 979}
 980EXPORT_SYMBOL(__dev_get_by_flags);
 981
 982/**
 983 *      dev_valid_name - check if name is okay for network device
 984 *      @name: name string
 985 *
 986 *      Network device names need to be valid file names to
 987 *      to allow sysfs to work.  We also disallow any kind of
 988 *      whitespace.
 989 */
 990bool dev_valid_name(const char *name)
 991{
 992        if (*name == '\0')
 993                return false;
 994        if (strlen(name) >= IFNAMSIZ)
 995                return false;
 996        if (!strcmp(name, ".") || !strcmp(name, ".."))
 997                return false;
 998
 999        while (*name) {
1000                if (*name == '/' || *name == ':' || isspace(*name))
1001                        return false;
1002                name++;
1003        }
1004        return true;
1005}
1006EXPORT_SYMBOL(dev_valid_name);
1007
1008/**
1009 *      __dev_alloc_name - allocate a name for a device
1010 *      @net: network namespace to allocate the device name in
1011 *      @name: name format string
1012 *      @buf:  scratch buffer and result name string
1013 *
1014 *      Passed a format string - eg "lt%d" it will try and find a suitable
1015 *      id. It scans list of devices to build up a free map, then chooses
1016 *      the first empty slot. The caller must hold the dev_base or rtnl lock
1017 *      while allocating the name and adding the device in order to avoid
1018 *      duplicates.
1019 *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1020 *      Returns the number of the unit assigned or a negative errno code.
1021 */
1022
1023static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1024{
1025        int i = 0;
1026        const char *p;
1027        const int max_netdevices = 8*PAGE_SIZE;
1028        unsigned long *inuse;
1029        struct net_device *d;
1030
1031        p = strnchr(name, IFNAMSIZ-1, '%');
1032        if (p) {
1033                /*
1034                 * Verify the string as this thing may have come from
1035                 * the user.  There must be either one "%d" and no other "%"
1036                 * characters.
1037                 */
1038                if (p[1] != 'd' || strchr(p + 2, '%'))
1039                        return -EINVAL;
1040
1041                /* Use one page as a bit array of possible slots */
1042                inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1043                if (!inuse)
1044                        return -ENOMEM;
1045
1046                for_each_netdev(net, d) {
1047                        if (!sscanf(d->name, name, &i))
1048                                continue;
1049                        if (i < 0 || i >= max_netdevices)
1050                                continue;
1051
1052                        /*  avoid cases where sscanf is not exact inverse of printf */
1053                        snprintf(buf, IFNAMSIZ, name, i);
1054                        if (!strncmp(buf, d->name, IFNAMSIZ))
1055                                set_bit(i, inuse);
1056                }
1057
1058                i = find_first_zero_bit(inuse, max_netdevices);
1059                free_page((unsigned long) inuse);
1060        }
1061
1062        if (buf != name)
1063                snprintf(buf, IFNAMSIZ, name, i);
1064        if (!__dev_get_by_name(net, buf))
1065                return i;
1066
1067        /* It is possible to run out of possible slots
1068         * when the name is long and there isn't enough space left
1069         * for the digits, or if all bits are used.
1070         */
1071        return -ENFILE;
1072}
1073
1074/**
1075 *      dev_alloc_name - allocate a name for a device
1076 *      @dev: device
1077 *      @name: name format string
1078 *
1079 *      Passed a format string - eg "lt%d" it will try and find a suitable
1080 *      id. It scans list of devices to build up a free map, then chooses
1081 *      the first empty slot. The caller must hold the dev_base or rtnl lock
1082 *      while allocating the name and adding the device in order to avoid
1083 *      duplicates.
1084 *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1085 *      Returns the number of the unit assigned or a negative errno code.
1086 */
1087
1088int dev_alloc_name(struct net_device *dev, const char *name)
1089{
1090        char buf[IFNAMSIZ];
1091        struct net *net;
1092        int ret;
1093
1094        BUG_ON(!dev_net(dev));
1095        net = dev_net(dev);
1096        ret = __dev_alloc_name(net, name, buf);
1097        if (ret >= 0)
1098                strlcpy(dev->name, buf, IFNAMSIZ);
1099        return ret;
1100}
1101EXPORT_SYMBOL(dev_alloc_name);
1102
1103static int dev_alloc_name_ns(struct net *net,
1104                             struct net_device *dev,
1105                             const char *name)
1106{
1107        char buf[IFNAMSIZ];
1108        int ret;
1109
1110        ret = __dev_alloc_name(net, name, buf);
1111        if (ret >= 0)
1112                strlcpy(dev->name, buf, IFNAMSIZ);
1113        return ret;
1114}
1115
1116static int dev_get_valid_name(struct net *net,
1117                              struct net_device *dev,
1118                              const char *name)
1119{
1120        BUG_ON(!net);
1121
1122        if (!dev_valid_name(name))
1123                return -EINVAL;
1124
1125        if (strchr(name, '%'))
1126                return dev_alloc_name_ns(net, dev, name);
1127        else if (__dev_get_by_name(net, name))
1128                return -EEXIST;
1129        else if (dev->name != name)
1130                strlcpy(dev->name, name, IFNAMSIZ);
1131
1132        return 0;
1133}
1134
1135/**
1136 *      dev_change_name - change name of a device
1137 *      @dev: device
1138 *      @newname: name (or format string) must be at least IFNAMSIZ
1139 *
1140 *      Change name of a device, can pass format strings "eth%d".
1141 *      for wildcarding.
1142 */
1143int dev_change_name(struct net_device *dev, const char *newname)
1144{
1145        unsigned char old_assign_type;
1146        char oldname[IFNAMSIZ];
1147        int err = 0;
1148        int ret;
1149        struct net *net;
1150
1151        ASSERT_RTNL();
1152        BUG_ON(!dev_net(dev));
1153
1154        net = dev_net(dev);
1155        if (dev->flags & IFF_UP)
1156                return -EBUSY;
1157
1158        write_seqcount_begin(&devnet_rename_seq);
1159
1160        if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1161                write_seqcount_end(&devnet_rename_seq);
1162                return 0;
1163        }
1164
1165        memcpy(oldname, dev->name, IFNAMSIZ);
1166
1167        err = dev_get_valid_name(net, dev, newname);
1168        if (err < 0) {
1169                write_seqcount_end(&devnet_rename_seq);
1170                return err;
1171        }
1172
1173        if (oldname[0] && !strchr(oldname, '%'))
1174                netdev_info(dev, "renamed from %s\n", oldname);
1175
1176        old_assign_type = dev->name_assign_type;
1177        dev->name_assign_type = NET_NAME_RENAMED;
1178
1179rollback:
1180        ret = device_rename(&dev->dev, dev->name);
1181        if (ret) {
1182                memcpy(dev->name, oldname, IFNAMSIZ);
1183                dev->name_assign_type = old_assign_type;
1184                write_seqcount_end(&devnet_rename_seq);
1185                return ret;
1186        }
1187
1188        write_seqcount_end(&devnet_rename_seq);
1189
1190        netdev_adjacent_rename_links(dev, oldname);
1191
1192        write_lock_bh(&dev_base_lock);
1193        hlist_del_rcu(&dev->name_hlist);
1194        write_unlock_bh(&dev_base_lock);
1195
1196        synchronize_rcu();
1197
1198        write_lock_bh(&dev_base_lock);
1199        hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1200        write_unlock_bh(&dev_base_lock);
1201
1202        ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1203        ret = notifier_to_errno(ret);
1204
1205        if (ret) {
1206                /* err >= 0 after dev_alloc_name() or stores the first errno */
1207                if (err >= 0) {
1208                        err = ret;
1209                        write_seqcount_begin(&devnet_rename_seq);
1210                        memcpy(dev->name, oldname, IFNAMSIZ);
1211                        memcpy(oldname, newname, IFNAMSIZ);
1212                        dev->name_assign_type = old_assign_type;
1213                        old_assign_type = NET_NAME_RENAMED;
1214                        goto rollback;
1215                } else {
1216                        pr_err("%s: name change rollback failed: %d\n",
1217                               dev->name, ret);
1218                }
1219        }
1220
1221        return err;
1222}
1223
1224/**
1225 *      dev_set_alias - change ifalias of a device
1226 *      @dev: device
1227 *      @alias: name up to IFALIASZ
1228 *      @len: limit of bytes to copy from info
1229 *
1230 *      Set ifalias for a device,
1231 */
1232int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1233{
1234        char *new_ifalias;
1235
1236        ASSERT_RTNL();
1237
1238        if (len >= IFALIASZ)
1239                return -EINVAL;
1240
1241        if (!len) {
1242                kfree(dev->ifalias);
1243                dev->ifalias = NULL;
1244                return 0;
1245        }
1246
1247        new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1248        if (!new_ifalias)
1249                return -ENOMEM;
1250        dev->ifalias = new_ifalias;
1251
1252        strlcpy(dev->ifalias, alias, len+1);
1253        return len;
1254}
1255
1256
1257/**
1258 *      netdev_features_change - device changes features
1259 *      @dev: device to cause notification
1260 *
1261 *      Called to indicate a device has changed features.
1262 */
1263void netdev_features_change(struct net_device *dev)
1264{
1265        call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1266}
1267EXPORT_SYMBOL(netdev_features_change);
1268
1269/**
1270 *      netdev_state_change - device changes state
1271 *      @dev: device to cause notification
1272 *
1273 *      Called to indicate a device has changed state. This function calls
1274 *      the notifier chains for netdev_chain and sends a NEWLINK message
1275 *      to the routing socket.
1276 */
1277void netdev_state_change(struct net_device *dev)
1278{
1279        if (dev->flags & IFF_UP) {
1280                struct netdev_notifier_change_info change_info;
1281
1282                change_info.flags_changed = 0;
1283                call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1284                                              &change_info.info);
1285                rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1286        }
1287}
1288EXPORT_SYMBOL(netdev_state_change);
1289
1290/**
1291 *      netdev_notify_peers - notify network peers about existence of @dev
1292 *      @dev: network device
1293 *
1294 * Generate traffic such that interested network peers are aware of
1295 * @dev, such as by generating a gratuitous ARP. This may be used when
1296 * a device wants to inform the rest of the network about some sort of
1297 * reconfiguration such as a failover event or virtual machine
1298 * migration.
1299 */
1300void netdev_notify_peers(struct net_device *dev)
1301{
1302        rtnl_lock();
1303        call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1304        rtnl_unlock();
1305}
1306EXPORT_SYMBOL(netdev_notify_peers);
1307
1308static int __dev_open(struct net_device *dev)
1309{
1310        const struct net_device_ops *ops = dev->netdev_ops;
1311        int ret;
1312
1313        ASSERT_RTNL();
1314
1315        if (!netif_device_present(dev))
1316                return -ENODEV;
1317
1318        /* Block netpoll from trying to do any rx path servicing.
1319         * If we don't do this there is a chance ndo_poll_controller
1320         * or ndo_poll may be running while we open the device
1321         */
1322        netpoll_poll_disable(dev);
1323
1324        ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1325        ret = notifier_to_errno(ret);
1326        if (ret)
1327                return ret;
1328
1329        set_bit(__LINK_STATE_START, &dev->state);
1330
1331        if (ops->ndo_validate_addr)
1332                ret = ops->ndo_validate_addr(dev);
1333
1334        if (!ret && ops->ndo_open)
1335                ret = ops->ndo_open(dev);
1336
1337        netpoll_poll_enable(dev);
1338
1339        if (ret)
1340                clear_bit(__LINK_STATE_START, &dev->state);
1341        else {
1342                dev->flags |= IFF_UP;
1343                dev_set_rx_mode(dev);
1344                dev_activate(dev);
1345                add_device_randomness(dev->dev_addr, dev->addr_len);
1346        }
1347
1348        return ret;
1349}
1350
1351/**
1352 *      dev_open        - prepare an interface for use.
1353 *      @dev:   device to open
1354 *
1355 *      Takes a device from down to up state. The device's private open
1356 *      function is invoked and then the multicast lists are loaded. Finally
1357 *      the device is moved into the up state and a %NETDEV_UP message is
1358 *      sent to the netdev notifier chain.
1359 *
1360 *      Calling this function on an active interface is a nop. On a failure
1361 *      a negative errno code is returned.
1362 */
1363int dev_open(struct net_device *dev)
1364{
1365        int ret;
1366
1367        if (dev->flags & IFF_UP)
1368                return 0;
1369
1370        ret = __dev_open(dev);
1371        if (ret < 0)
1372                return ret;
1373
1374        rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1375        call_netdevice_notifiers(NETDEV_UP, dev);
1376
1377        return ret;
1378}
1379EXPORT_SYMBOL(dev_open);
1380
1381static int __dev_close_many(struct list_head *head)
1382{
1383        struct net_device *dev;
1384
1385        ASSERT_RTNL();
1386        might_sleep();
1387
1388        list_for_each_entry(dev, head, close_list) {
1389                /* Temporarily disable netpoll until the interface is down */
1390                netpoll_poll_disable(dev);
1391
1392                call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1393
1394                clear_bit(__LINK_STATE_START, &dev->state);
1395
1396                /* Synchronize to scheduled poll. We cannot touch poll list, it
1397                 * can be even on different cpu. So just clear netif_running().
1398                 *
1399                 * dev->stop() will invoke napi_disable() on all of it's
1400                 * napi_struct instances on this device.
1401                 */
1402                smp_mb__after_atomic(); /* Commit netif_running(). */
1403        }
1404
1405        dev_deactivate_many(head);
1406
1407        list_for_each_entry(dev, head, close_list) {
1408                const struct net_device_ops *ops = dev->netdev_ops;
1409
1410                /*
1411                 *      Call the device specific close. This cannot fail.
1412                 *      Only if device is UP
1413                 *
1414                 *      We allow it to be called even after a DETACH hot-plug
1415                 *      event.
1416                 */
1417                if (ops->ndo_stop)
1418                        ops->ndo_stop(dev);
1419
1420                dev->flags &= ~IFF_UP;
1421                netpoll_poll_enable(dev);
1422        }
1423
1424        return 0;
1425}
1426
1427static int __dev_close(struct net_device *dev)
1428{
1429        int retval;
1430        LIST_HEAD(single);
1431
1432        list_add(&dev->close_list, &single);
1433        retval = __dev_close_many(&single);
1434        list_del(&single);
1435
1436        return retval;
1437}
1438
1439int dev_close_many(struct list_head *head, bool unlink)
1440{
1441        struct net_device *dev, *tmp;
1442
1443        /* Remove the devices that don't need to be closed */
1444        list_for_each_entry_safe(dev, tmp, head, close_list)
1445                if (!(dev->flags & IFF_UP))
1446                        list_del_init(&dev->close_list);
1447
1448        __dev_close_many(head);
1449
1450        list_for_each_entry_safe(dev, tmp, head, close_list) {
1451                rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1452                call_netdevice_notifiers(NETDEV_DOWN, dev);
1453                if (unlink)
1454                        list_del_init(&dev->close_list);
1455        }
1456
1457        return 0;
1458}
1459EXPORT_SYMBOL(dev_close_many);
1460
1461/**
1462 *      dev_close - shutdown an interface.
1463 *      @dev: device to shutdown
1464 *
1465 *      This function moves an active device into down state. A
1466 *      %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1467 *      is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1468 *      chain.
1469 */
1470int dev_close(struct net_device *dev)
1471{
1472        if (dev->flags & IFF_UP) {
1473                LIST_HEAD(single);
1474
1475                list_add(&dev->close_list, &single);
1476                dev_close_many(&single, true);
1477                list_del(&single);
1478        }
1479        return 0;
1480}
1481EXPORT_SYMBOL(dev_close);
1482
1483
1484/**
1485 *      dev_disable_lro - disable Large Receive Offload on a device
1486 *      @dev: device
1487 *
1488 *      Disable Large Receive Offload (LRO) on a net device.  Must be
1489 *      called under RTNL.  This is needed if received packets may be
1490 *      forwarded to another interface.
1491 */
1492void dev_disable_lro(struct net_device *dev)
1493{
1494        struct net_device *lower_dev;
1495        struct list_head *iter;
1496
1497        dev->wanted_features &= ~NETIF_F_LRO;
1498        netdev_update_features(dev);
1499
1500        if (unlikely(dev->features & NETIF_F_LRO))
1501                netdev_WARN(dev, "failed to disable LRO!\n");
1502
1503        netdev_for_each_lower_dev(dev, lower_dev, iter)
1504                dev_disable_lro(lower_dev);
1505}
1506EXPORT_SYMBOL(dev_disable_lro);
1507
1508static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1509                                   struct net_device *dev)
1510{
1511        struct netdev_notifier_info info;
1512
1513        netdev_notifier_info_init(&info, dev);
1514        return nb->notifier_call(nb, val, &info);
1515}
1516
1517static int dev_boot_phase = 1;
1518
1519/**
1520 *      register_netdevice_notifier - register a network notifier block
1521 *      @nb: notifier
1522 *
1523 *      Register a notifier to be called when network device events occur.
1524 *      The notifier passed is linked into the kernel structures and must
1525 *      not be reused until it has been unregistered. A negative errno code
1526 *      is returned on a failure.
1527 *
1528 *      When registered all registration and up events are replayed
1529 *      to the new notifier to allow device to have a race free
1530 *      view of the network device list.
1531 */
1532
1533int register_netdevice_notifier(struct notifier_block *nb)
1534{
1535        struct net_device *dev;
1536        struct net_device *last;
1537        struct net *net;
1538        int err;
1539
1540        rtnl_lock();
1541        err = raw_notifier_chain_register(&netdev_chain, nb);
1542        if (err)
1543                goto unlock;
1544        if (dev_boot_phase)
1545                goto unlock;
1546        for_each_net(net) {
1547                for_each_netdev(net, dev) {
1548                        err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1549                        err = notifier_to_errno(err);
1550                        if (err)
1551                                goto rollback;
1552
1553                        if (!(dev->flags & IFF_UP))
1554                                continue;
1555
1556                        call_netdevice_notifier(nb, NETDEV_UP, dev);
1557                }
1558        }
1559
1560unlock:
1561        rtnl_unlock();
1562        return err;
1563
1564rollback:
1565        last = dev;
1566        for_each_net(net) {
1567                for_each_netdev(net, dev) {
1568                        if (dev == last)
1569                                goto outroll;
1570
1571                        if (dev->flags & IFF_UP) {
1572                                call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1573                                                        dev);
1574                                call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1575                        }
1576                        call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1577                }
1578        }
1579
1580outroll:
1581        raw_notifier_chain_unregister(&netdev_chain, nb);
1582        goto unlock;
1583}
1584EXPORT_SYMBOL(register_netdevice_notifier);
1585
1586/**
1587 *      unregister_netdevice_notifier - unregister a network notifier block
1588 *      @nb: notifier
1589 *
1590 *      Unregister a notifier previously registered by
1591 *      register_netdevice_notifier(). The notifier is unlinked into the
1592 *      kernel structures and may then be reused. A negative errno code
1593 *      is returned on a failure.
1594 *
1595 *      After unregistering unregister and down device events are synthesized
1596 *      for all devices on the device list to the removed notifier to remove
1597 *      the need for special case cleanup code.
1598 */
1599
1600int unregister_netdevice_notifier(struct notifier_block *nb)
1601{
1602        struct net_device *dev;
1603        struct net *net;
1604        int err;
1605
1606        rtnl_lock();
1607        err = raw_notifier_chain_unregister(&netdev_chain, nb);
1608        if (err)
1609                goto unlock;
1610
1611        for_each_net(net) {
1612                for_each_netdev(net, dev) {
1613                        if (dev->flags & IFF_UP) {
1614                                call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1615                                                        dev);
1616                                call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1617                        }
1618                        call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1619                }
1620        }
1621unlock:
1622        rtnl_unlock();
1623        return err;
1624}
1625EXPORT_SYMBOL(unregister_netdevice_notifier);
1626
1627/**
1628 *      call_netdevice_notifiers_info - call all network notifier blocks
1629 *      @val: value passed unmodified to notifier function
1630 *      @dev: net_device pointer passed unmodified to notifier function
1631 *      @info: notifier information data
1632 *
1633 *      Call all network notifier blocks.  Parameters and return value
1634 *      are as for raw_notifier_call_chain().
1635 */
1636
1637static int call_netdevice_notifiers_info(unsigned long val,
1638                                         struct net_device *dev,
1639                                         struct netdev_notifier_info *info)
1640{
1641        ASSERT_RTNL();
1642        netdev_notifier_info_init(info, dev);
1643        return raw_notifier_call_chain(&netdev_chain, val, info);
1644}
1645
1646/**
1647 *      call_netdevice_notifiers - call all network notifier blocks
1648 *      @val: value passed unmodified to notifier function
1649 *      @dev: net_device pointer passed unmodified to notifier function
1650 *
1651 *      Call all network notifier blocks.  Parameters and return value
1652 *      are as for raw_notifier_call_chain().
1653 */
1654
1655int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1656{
1657        struct netdev_notifier_info info;
1658
1659        return call_netdevice_notifiers_info(val, dev, &info);
1660}
1661EXPORT_SYMBOL(call_netdevice_notifiers);
1662
1663#ifdef CONFIG_NET_INGRESS
1664static struct static_key ingress_needed __read_mostly;
1665
1666void net_inc_ingress_queue(void)
1667{
1668        static_key_slow_inc(&ingress_needed);
1669}
1670EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1671
1672void net_dec_ingress_queue(void)
1673{
1674        static_key_slow_dec(&ingress_needed);
1675}
1676EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1677#endif
1678
1679#ifdef CONFIG_NET_EGRESS
1680static struct static_key egress_needed __read_mostly;
1681
1682void net_inc_egress_queue(void)
1683{
1684        static_key_slow_inc(&egress_needed);
1685}
1686EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1687
1688void net_dec_egress_queue(void)
1689{
1690        static_key_slow_dec(&egress_needed);
1691}
1692EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1693#endif
1694
1695static struct static_key netstamp_needed __read_mostly;
1696#ifdef HAVE_JUMP_LABEL
1697/* We are not allowed to call static_key_slow_dec() from irq context
1698 * If net_disable_timestamp() is called from irq context, defer the
1699 * static_key_slow_dec() calls.
1700 */
1701static atomic_t netstamp_needed_deferred;
1702#endif
1703
1704void net_enable_timestamp(void)
1705{
1706#ifdef HAVE_JUMP_LABEL
1707        int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1708
1709        if (deferred) {
1710                while (--deferred)
1711                        static_key_slow_dec(&netstamp_needed);
1712                return;
1713        }
1714#endif
1715        static_key_slow_inc(&netstamp_needed);
1716}
1717EXPORT_SYMBOL(net_enable_timestamp);
1718
1719void net_disable_timestamp(void)
1720{
1721#ifdef HAVE_JUMP_LABEL
1722        if (in_interrupt()) {
1723                atomic_inc(&netstamp_needed_deferred);
1724                return;
1725        }
1726#endif
1727        static_key_slow_dec(&netstamp_needed);
1728}
1729EXPORT_SYMBOL(net_disable_timestamp);
1730
1731static inline void net_timestamp_set(struct sk_buff *skb)
1732{
1733        skb->tstamp.tv64 = 0;
1734        if (static_key_false(&netstamp_needed))
1735                __net_timestamp(skb);
1736}
1737
1738#define net_timestamp_check(COND, SKB)                  \
1739        if (static_key_false(&netstamp_needed)) {               \
1740                if ((COND) && !(SKB)->tstamp.tv64)      \
1741                        __net_timestamp(SKB);           \
1742        }                                               \
1743
1744bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
1745{
1746        unsigned int len;
1747
1748        if (!(dev->flags & IFF_UP))
1749                return false;
1750
1751        len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1752        if (skb->len <= len)
1753                return true;
1754
1755        /* if TSO is enabled, we don't care about the length as the packet
1756         * could be forwarded without being segmented before
1757         */
1758        if (skb_is_gso(skb))
1759                return true;
1760
1761        return false;
1762}
1763EXPORT_SYMBOL_GPL(is_skb_forwardable);
1764
1765int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1766{
1767        if (skb_orphan_frags(skb, GFP_ATOMIC) ||
1768            unlikely(!is_skb_forwardable(dev, skb))) {
1769                atomic_long_inc(&dev->rx_dropped);
1770                kfree_skb(skb);
1771                return NET_RX_DROP;
1772        }
1773
1774        skb_scrub_packet(skb, true);
1775        skb->priority = 0;
1776        skb->protocol = eth_type_trans(skb, dev);
1777        skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1778
1779        return 0;
1780}
1781EXPORT_SYMBOL_GPL(__dev_forward_skb);
1782
1783/**
1784 * dev_forward_skb - loopback an skb to another netif
1785 *
1786 * @dev: destination network device
1787 * @skb: buffer to forward
1788 *
1789 * return values:
1790 *      NET_RX_SUCCESS  (no congestion)
1791 *      NET_RX_DROP     (packet was dropped, but freed)
1792 *
1793 * dev_forward_skb can be used for injecting an skb from the
1794 * start_xmit function of one device into the receive queue
1795 * of another device.
1796 *
1797 * The receiving device may be in another namespace, so
1798 * we have to clear all information in the skb that could
1799 * impact namespace isolation.
1800 */
1801int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1802{
1803        return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
1804}
1805EXPORT_SYMBOL_GPL(dev_forward_skb);
1806
1807static inline int deliver_skb(struct sk_buff *skb,
1808                              struct packet_type *pt_prev,
1809                              struct net_device *orig_dev)
1810{
1811        if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1812                return -ENOMEM;
1813        atomic_inc(&skb->users);
1814        return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1815}
1816
1817static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1818                                          struct packet_type **pt,
1819                                          struct net_device *orig_dev,
1820                                          __be16 type,
1821                                          struct list_head *ptype_list)
1822{
1823        struct packet_type *ptype, *pt_prev = *pt;
1824
1825        list_for_each_entry_rcu(ptype, ptype_list, list) {
1826                if (ptype->type != type)
1827                        continue;
1828                if (pt_prev)
1829                        deliver_skb(skb, pt_prev, orig_dev);
1830                pt_prev = ptype;
1831        }
1832        *pt = pt_prev;
1833}
1834
1835static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1836{
1837        if (!ptype->af_packet_priv || !skb->sk)
1838                return false;
1839
1840        if (ptype->id_match)
1841                return ptype->id_match(ptype, skb->sk);
1842        else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1843                return true;
1844
1845        return false;
1846}
1847
1848/*
1849 *      Support routine. Sends outgoing frames to any network
1850 *      taps currently in use.
1851 */
1852
1853static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1854{
1855        struct packet_type *ptype;
1856        struct sk_buff *skb2 = NULL;
1857        struct packet_type *pt_prev = NULL;
1858        struct list_head *ptype_list = &ptype_all;
1859
1860        rcu_read_lock();
1861again:
1862        list_for_each_entry_rcu(ptype, ptype_list, list) {
1863                /* Never send packets back to the socket
1864                 * they originated from - MvS (miquels@drinkel.ow.org)
1865                 */
1866                if (skb_loop_sk(ptype, skb))
1867                        continue;
1868
1869                if (pt_prev) {
1870                        deliver_skb(skb2, pt_prev, skb->dev);
1871                        pt_prev = ptype;
1872                        continue;
1873                }
1874
1875                /* need to clone skb, done only once */
1876                skb2 = skb_clone(skb, GFP_ATOMIC);
1877                if (!skb2)
1878                        goto out_unlock;
1879
1880                net_timestamp_set(skb2);
1881
1882                /* skb->nh should be correctly
1883                 * set by sender, so that the second statement is
1884                 * just protection against buggy protocols.
1885                 */
1886                skb_reset_mac_header(skb2);
1887
1888                if (skb_network_header(skb2) < skb2->data ||
1889                    skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1890                        net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1891                                             ntohs(skb2->protocol),
1892                                             dev->name);
1893                        skb_reset_network_header(skb2);
1894                }
1895
1896                skb2->transport_header = skb2->network_header;
1897                skb2->pkt_type = PACKET_OUTGOING;
1898                pt_prev = ptype;
1899        }
1900
1901        if (ptype_list == &ptype_all) {
1902                ptype_list = &dev->ptype_all;
1903                goto again;
1904        }
1905out_unlock:
1906        if (pt_prev)
1907                pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1908        rcu_read_unlock();
1909}
1910
1911/**
1912 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1913 * @dev: Network device
1914 * @txq: number of queues available
1915 *
1916 * If real_num_tx_queues is changed the tc mappings may no longer be
1917 * valid. To resolve this verify the tc mapping remains valid and if
1918 * not NULL the mapping. With no priorities mapping to this
1919 * offset/count pair it will no longer be used. In the worst case TC0
1920 * is invalid nothing can be done so disable priority mappings. If is
1921 * expected that drivers will fix this mapping if they can before
1922 * calling netif_set_real_num_tx_queues.
1923 */
1924static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1925{
1926        int i;
1927        struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1928
1929        /* If TC0 is invalidated disable TC mapping */
1930        if (tc->offset + tc->count > txq) {
1931                pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1932                dev->num_tc = 0;
1933                return;
1934        }
1935
1936        /* Invalidated prio to tc mappings set to TC0 */
1937        for (i = 1; i < TC_BITMASK + 1; i++) {
1938                int q = netdev_get_prio_tc_map(dev, i);
1939
1940                tc = &dev->tc_to_txq[q];
1941                if (tc->offset + tc->count > txq) {
1942                        pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1943                                i, q);
1944                        netdev_set_prio_tc_map(dev, i, 0);
1945                }
1946        }
1947}
1948
1949#ifdef CONFIG_XPS
1950static DEFINE_MUTEX(xps_map_mutex);
1951#define xmap_dereference(P)             \
1952        rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1953
1954static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1955                                        int cpu, u16 index)
1956{
1957        struct xps_map *map = NULL;
1958        int pos;
1959
1960        if (dev_maps)
1961                map = xmap_dereference(dev_maps->cpu_map[cpu]);
1962
1963        for (pos = 0; map && pos < map->len; pos++) {
1964                if (map->queues[pos] == index) {
1965                        if (map->len > 1) {
1966                                map->queues[pos] = map->queues[--map->len];
1967                        } else {
1968                                RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1969                                kfree_rcu(map, rcu);
1970                                map = NULL;
1971                        }
1972                        break;
1973                }
1974        }
1975
1976        return map;
1977}
1978
1979static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
1980{
1981        struct xps_dev_maps *dev_maps;
1982        int cpu, i;
1983        bool active = false;
1984
1985        mutex_lock(&xps_map_mutex);
1986        dev_maps = xmap_dereference(dev->xps_maps);
1987
1988        if (!dev_maps)
1989                goto out_no_maps;
1990
1991        for_each_possible_cpu(cpu) {
1992                for (i = index; i < dev->num_tx_queues; i++) {
1993                        if (!remove_xps_queue(dev_maps, cpu, i))
1994                                break;
1995                }
1996                if (i == dev->num_tx_queues)
1997                        active = true;
1998        }
1999
2000        if (!active) {
2001                RCU_INIT_POINTER(dev->xps_maps, NULL);
2002                kfree_rcu(dev_maps, rcu);
2003        }
2004
2005        for (i = index; i < dev->num_tx_queues; i++)
2006                netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
2007                                             NUMA_NO_NODE);
2008
2009out_no_maps:
2010        mutex_unlock(&xps_map_mutex);
2011}
2012
2013static struct xps_map *expand_xps_map(struct xps_map *map,
2014                                      int cpu, u16 index)
2015{
2016        struct xps_map *new_map;
2017        int alloc_len = XPS_MIN_MAP_ALLOC;
2018        int i, pos;
2019
2020        for (pos = 0; map && pos < map->len; pos++) {
2021                if (map->queues[pos] != index)
2022                        continue;
2023                return map;
2024        }
2025
2026        /* Need to add queue to this CPU's existing map */
2027        if (map) {
2028                if (pos < map->alloc_len)
2029                        return map;
2030
2031                alloc_len = map->alloc_len * 2;
2032        }
2033
2034        /* Need to allocate new map to store queue on this CPU's map */
2035        new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2036                               cpu_to_node(cpu));
2037        if (!new_map)
2038                return NULL;
2039
2040        for (i = 0; i < pos; i++)
2041                new_map->queues[i] = map->queues[i];
2042        new_map->alloc_len = alloc_len;
2043        new_map->len = pos;
2044
2045        return new_map;
2046}
2047
2048int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2049                        u16 index)
2050{
2051        struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
2052        struct xps_map *map, *new_map;
2053        int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
2054        int cpu, numa_node_id = -2;
2055        bool active = false;
2056
2057        mutex_lock(&xps_map_mutex);
2058
2059        dev_maps = xmap_dereference(dev->xps_maps);
2060
2061        /* allocate memory for queue storage */
2062        for_each_online_cpu(cpu) {
2063                if (!cpumask_test_cpu(cpu, mask))
2064                        continue;
2065
2066                if (!new_dev_maps)
2067                        new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2068                if (!new_dev_maps) {
2069                        mutex_unlock(&xps_map_mutex);
2070                        return -ENOMEM;
2071                }
2072
2073                map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2074                                 NULL;
2075
2076                map = expand_xps_map(map, cpu, index);
2077                if (!map)
2078                        goto error;
2079
2080                RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2081        }
2082
2083        if (!new_dev_maps)
2084                goto out_no_new_maps;
2085
2086        for_each_possible_cpu(cpu) {
2087                if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2088                        /* add queue to CPU maps */
2089                        int pos = 0;
2090
2091                        map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2092                        while ((pos < map->len) && (map->queues[pos] != index))
2093                                pos++;
2094
2095                        if (pos == map->len)
2096                                map->queues[map->len++] = index;
2097#ifdef CONFIG_NUMA
2098                        if (numa_node_id == -2)
2099                                numa_node_id = cpu_to_node(cpu);
2100                        else if (numa_node_id != cpu_to_node(cpu))
2101                                numa_node_id = -1;
2102#endif
2103                } else if (dev_maps) {
2104                        /* fill in the new device map from the old device map */
2105                        map = xmap_dereference(dev_maps->cpu_map[cpu]);
2106                        RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2107                }
2108
2109        }
2110
2111        rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2112
2113        /* Cleanup old maps */
2114        if (dev_maps) {
2115                for_each_possible_cpu(cpu) {
2116                        new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2117                        map = xmap_dereference(dev_maps->cpu_map[cpu]);
2118                        if (map && map != new_map)
2119                                kfree_rcu(map, rcu);
2120                }
2121
2122                kfree_rcu(dev_maps, rcu);
2123        }
2124
2125        dev_maps = new_dev_maps;
2126        active = true;
2127
2128out_no_new_maps:
2129        /* update Tx queue numa node */
2130        netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2131                                     (numa_node_id >= 0) ? numa_node_id :
2132                                     NUMA_NO_NODE);
2133
2134        if (!dev_maps)
2135                goto out_no_maps;
2136
2137        /* removes queue from unused CPUs */
2138        for_each_possible_cpu(cpu) {
2139                if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2140                        continue;
2141
2142                if (remove_xps_queue(dev_maps, cpu, index))
2143                        active = true;
2144        }
2145
2146        /* free map if not active */
2147        if (!active) {
2148                RCU_INIT_POINTER(dev->xps_maps, NULL);
2149                kfree_rcu(dev_maps, rcu);
2150        }
2151
2152out_no_maps:
2153        mutex_unlock(&xps_map_mutex);
2154
2155        return 0;
2156error:
2157        /* remove any maps that we added */
2158        for_each_possible_cpu(cpu) {
2159                new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2160                map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2161                                 NULL;
2162                if (new_map && new_map != map)
2163                        kfree(new_map);
2164        }
2165
2166        mutex_unlock(&xps_map_mutex);
2167
2168        kfree(new_dev_maps);
2169        return -ENOMEM;
2170}
2171EXPORT_SYMBOL(netif_set_xps_queue);
2172
2173#endif
2174/*
2175 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2176 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2177 */
2178int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2179{
2180        int rc;
2181
2182        if (txq < 1 || txq > dev->num_tx_queues)
2183                return -EINVAL;
2184
2185        if (dev->reg_state == NETREG_REGISTERED ||
2186            dev->reg_state == NETREG_UNREGISTERING) {
2187                ASSERT_RTNL();
2188
2189                rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2190                                                  txq);
2191                if (rc)
2192                        return rc;
2193
2194                if (dev->num_tc)
2195                        netif_setup_tc(dev, txq);
2196
2197                if (txq < dev->real_num_tx_queues) {
2198                        qdisc_reset_all_tx_gt(dev, txq);
2199#ifdef CONFIG_XPS
2200                        netif_reset_xps_queues_gt(dev, txq);
2201#endif
2202                }
2203        }
2204
2205        dev->real_num_tx_queues = txq;
2206        return 0;
2207}
2208EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2209
2210#ifdef CONFIG_SYSFS
2211/**
2212 *      netif_set_real_num_rx_queues - set actual number of RX queues used
2213 *      @dev: Network device
2214 *      @rxq: Actual number of RX queues
2215 *
2216 *      This must be called either with the rtnl_lock held or before
2217 *      registration of the net device.  Returns 0 on success, or a
2218 *      negative error code.  If called before registration, it always
2219 *      succeeds.
2220 */
2221int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2222{
2223        int rc;
2224
2225        if (rxq < 1 || rxq > dev->num_rx_queues)
2226                return -EINVAL;
2227
2228        if (dev->reg_state == NETREG_REGISTERED) {
2229                ASSERT_RTNL();
2230
2231                rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2232                                                  rxq);
2233                if (rc)
2234                        return rc;
2235        }
2236
2237        dev->real_num_rx_queues = rxq;
2238        return 0;
2239}
2240EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2241#endif
2242
2243/**
2244 * netif_get_num_default_rss_queues - default number of RSS queues
2245 *
2246 * This routine should set an upper limit on the number of RSS queues
2247 * used by default by multiqueue devices.
2248 */
2249int netif_get_num_default_rss_queues(void)
2250{
2251        return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2252}
2253EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2254
2255static inline void __netif_reschedule(struct Qdisc *q)
2256{
2257        struct softnet_data *sd;
2258        unsigned long flags;
2259
2260        local_irq_save(flags);
2261        sd = this_cpu_ptr(&softnet_data);
2262        q->next_sched = NULL;
2263        *sd->output_queue_tailp = q;
2264        sd->output_queue_tailp = &q->next_sched;
2265        raise_softirq_irqoff(NET_TX_SOFTIRQ);
2266        local_irq_restore(flags);
2267}
2268
2269void __netif_schedule(struct Qdisc *q)
2270{
2271        if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2272                __netif_reschedule(q);
2273}
2274EXPORT_SYMBOL(__netif_schedule);
2275
2276struct dev_kfree_skb_cb {
2277        enum skb_free_reason reason;
2278};
2279
2280static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2281{
2282        return (struct dev_kfree_skb_cb *)skb->cb;
2283}
2284
2285void netif_schedule_queue(struct netdev_queue *txq)
2286{
2287        rcu_read_lock();
2288        if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2289                struct Qdisc *q = rcu_dereference(txq->qdisc);
2290
2291                __netif_schedule(q);
2292        }
2293        rcu_read_unlock();
2294}
2295EXPORT_SYMBOL(netif_schedule_queue);
2296
2297/**
2298 *      netif_wake_subqueue - allow sending packets on subqueue
2299 *      @dev: network device
2300 *      @queue_index: sub queue index
2301 *
2302 * Resume individual transmit queue of a device with multiple transmit queues.
2303 */
2304void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2305{
2306        struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2307
2308        if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
2309                struct Qdisc *q;
2310
2311                rcu_read_lock();
2312                q = rcu_dereference(txq->qdisc);
2313                __netif_schedule(q);
2314                rcu_read_unlock();
2315        }
2316}
2317EXPORT_SYMBOL(netif_wake_subqueue);
2318
2319void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2320{
2321        if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2322                struct Qdisc *q;
2323
2324                rcu_read_lock();
2325                q = rcu_dereference(dev_queue->qdisc);
2326                __netif_schedule(q);
2327                rcu_read_unlock();
2328        }
2329}
2330EXPORT_SYMBOL(netif_tx_wake_queue);
2331
2332void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2333{
2334        unsigned long flags;
2335
2336        if (likely(atomic_read(&skb->users) == 1)) {
2337                smp_rmb();
2338                atomic_set(&skb->users, 0);
2339        } else if (likely(!atomic_dec_and_test(&skb->users))) {
2340                return;
2341        }
2342        get_kfree_skb_cb(skb)->reason = reason;
2343        local_irq_save(flags);
2344        skb->next = __this_cpu_read(softnet_data.completion_queue);
2345        __this_cpu_write(softnet_data.completion_queue, skb);
2346        raise_softirq_irqoff(NET_TX_SOFTIRQ);
2347        local_irq_restore(flags);
2348}
2349EXPORT_SYMBOL(__dev_kfree_skb_irq);
2350
2351void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
2352{
2353        if (in_irq() || irqs_disabled())
2354                __dev_kfree_skb_irq(skb, reason);
2355        else
2356                dev_kfree_skb(skb);
2357}
2358EXPORT_SYMBOL(__dev_kfree_skb_any);
2359
2360
2361/**
2362 * netif_device_detach - mark device as removed
2363 * @dev: network device
2364 *
2365 * Mark device as removed from system and therefore no longer available.
2366 */
2367void netif_device_detach(struct net_device *dev)
2368{
2369        if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2370            netif_running(dev)) {
2371                netif_tx_stop_all_queues(dev);
2372        }
2373}
2374EXPORT_SYMBOL(netif_device_detach);
2375
2376/**
2377 * netif_device_attach - mark device as attached
2378 * @dev: network device
2379 *
2380 * Mark device as attached from system and restart if needed.
2381 */
2382void netif_device_attach(struct net_device *dev)
2383{
2384        if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2385            netif_running(dev)) {
2386                netif_tx_wake_all_queues(dev);
2387                __netdev_watchdog_up(dev);
2388        }
2389}
2390EXPORT_SYMBOL(netif_device_attach);
2391
2392/*
2393 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2394 * to be used as a distribution range.
2395 */
2396u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2397                  unsigned int num_tx_queues)
2398{
2399        u32 hash;
2400        u16 qoffset = 0;
2401        u16 qcount = num_tx_queues;
2402
2403        if (skb_rx_queue_recorded(skb)) {
2404                hash = skb_get_rx_queue(skb);
2405                while (unlikely(hash >= num_tx_queues))
2406                        hash -= num_tx_queues;
2407                return hash;
2408        }
2409
2410        if (dev->num_tc) {
2411                u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2412                qoffset = dev->tc_to_txq[tc].offset;
2413                qcount = dev->tc_to_txq[tc].count;
2414        }
2415
2416        return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2417}
2418EXPORT_SYMBOL(__skb_tx_hash);
2419
2420static void skb_warn_bad_offload(const struct sk_buff *skb)
2421{
2422        static const netdev_features_t null_features = 0;
2423        struct net_device *dev = skb->dev;
2424        const char *name = "";
2425
2426        if (!net_ratelimit())
2427                return;
2428
2429        if (dev) {
2430                if (dev->dev.parent)
2431                        name = dev_driver_string(dev->dev.parent);
2432                else
2433                        name = netdev_name(dev);
2434        }
2435        WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2436             "gso_type=%d ip_summed=%d\n",
2437             name, dev ? &dev->features : &null_features,
2438             skb->sk ? &skb->sk->sk_route_caps : &null_features,
2439             skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2440             skb_shinfo(skb)->gso_type, skb->ip_summed);
2441}
2442
2443/*
2444 * Invalidate hardware checksum when packet is to be mangled, and
2445 * complete checksum manually on outgoing path.
2446 */
2447int skb_checksum_help(struct sk_buff *skb)
2448{
2449        __wsum csum;
2450        int ret = 0, offset;
2451
2452        if (skb->ip_summed == CHECKSUM_COMPLETE)
2453                goto out_set_summed;
2454
2455        if (unlikely(skb_shinfo(skb)->gso_size)) {
2456                skb_warn_bad_offload(skb);
2457                return -EINVAL;
2458        }
2459
2460        /* Before computing a checksum, we should make sure no frag could
2461         * be modified by an external entity : checksum could be wrong.
2462         */
2463        if (skb_has_shared_frag(skb)) {
2464                ret = __skb_linearize(skb);
2465                if (ret)
2466                        goto out;
2467        }
2468
2469        offset = skb_checksum_start_offset(skb);
2470        BUG_ON(offset >= skb_headlen(skb));
2471        csum = skb_checksum(skb, offset, skb->len - offset, 0);
2472
2473        offset += skb->csum_offset;
2474        BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2475
2476        if (skb_cloned(skb) &&
2477            !skb_clone_writable(skb, offset + sizeof(__sum16))) {
2478                ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2479                if (ret)
2480                        goto out;
2481        }
2482
2483        *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2484out_set_summed:
2485        skb->ip_summed = CHECKSUM_NONE;
2486out:
2487        return ret;
2488}
2489EXPORT_SYMBOL(skb_checksum_help);
2490
2491/* skb_csum_offload_check - Driver helper function to determine if a device
2492 * with limited checksum offload capabilities is able to offload the checksum
2493 * for a given packet.
2494 *
2495 * Arguments:
2496 *   skb - sk_buff for the packet in question
2497 *   spec - contains the description of what device can offload
2498 *   csum_encapped - returns true if the checksum being offloaded is
2499 *            encpasulated. That is it is checksum for the transport header
2500 *            in the inner headers.
2501 *   checksum_help - when set indicates that helper function should
2502 *            call skb_checksum_help if offload checks fail
2503 *
2504 * Returns:
2505 *   true: Packet has passed the checksum checks and should be offloadable to
2506 *         the device (a driver may still need to check for additional
2507 *         restrictions of its device)
2508 *   false: Checksum is not offloadable. If checksum_help was set then
2509 *         skb_checksum_help was called to resolve checksum for non-GSO
2510 *         packets and when IP protocol is not SCTP
2511 */
2512bool __skb_csum_offload_chk(struct sk_buff *skb,
2513                            const struct skb_csum_offl_spec *spec,
2514                            bool *csum_encapped,
2515                            bool csum_help)
2516{
2517        struct iphdr *iph;
2518        struct ipv6hdr *ipv6;
2519        void *nhdr;
2520        int protocol;
2521        u8 ip_proto;
2522
2523        if (skb->protocol == htons(ETH_P_8021Q) ||
2524            skb->protocol == htons(ETH_P_8021AD)) {
2525                if (!spec->vlan_okay)
2526                        goto need_help;
2527        }
2528
2529        /* We check whether the checksum refers to a transport layer checksum in
2530         * the outermost header or an encapsulated transport layer checksum that
2531         * corresponds to the inner headers of the skb. If the checksum is for
2532         * something else in the packet we need help.
2533         */
2534        if (skb_checksum_start_offset(skb) == skb_transport_offset(skb)) {
2535                /* Non-encapsulated checksum */
2536                protocol = eproto_to_ipproto(vlan_get_protocol(skb));
2537                nhdr = skb_network_header(skb);
2538                *csum_encapped = false;
2539                if (spec->no_not_encapped)
2540                        goto need_help;
2541        } else if (skb->encapsulation && spec->encap_okay &&
2542                   skb_checksum_start_offset(skb) ==
2543                   skb_inner_transport_offset(skb)) {
2544                /* Encapsulated checksum */
2545                *csum_encapped = true;
2546                switch (skb->inner_protocol_type) {
2547                case ENCAP_TYPE_ETHER:
2548                        protocol = eproto_to_ipproto(skb->inner_protocol);
2549                        break;
2550                case ENCAP_TYPE_IPPROTO:
2551                        protocol = skb->inner_protocol;
2552                        break;
2553                }
2554                nhdr = skb_inner_network_header(skb);
2555        } else {
2556                goto need_help;
2557        }
2558
2559        switch (protocol) {
2560        case IPPROTO_IP:
2561                if (!spec->ipv4_okay)
2562                        goto need_help;
2563                iph = nhdr;
2564                ip_proto = iph->protocol;
2565                if (iph->ihl != 5 && !spec->ip_options_okay)
2566                        goto need_help;
2567                break;
2568        case IPPROTO_IPV6:
2569                if (!spec->ipv6_okay)
2570                        goto need_help;
2571                if (spec->no_encapped_ipv6 && *csum_encapped)
2572                        goto need_help;
2573                ipv6 = nhdr;
2574                nhdr += sizeof(*ipv6);
2575                ip_proto = ipv6->nexthdr;
2576                break;
2577        default:
2578                goto need_help;
2579        }
2580
2581ip_proto_again:
2582        switch (ip_proto) {
2583        case IPPROTO_TCP:
2584                if (!spec->tcp_okay ||
2585                    skb->csum_offset != offsetof(struct tcphdr, check))
2586                        goto need_help;
2587                break;
2588        case IPPROTO_UDP:
2589                if (!spec->udp_okay ||
2590                    skb->csum_offset != offsetof(struct udphdr, check))
2591                        goto need_help;
2592                break;
2593        case IPPROTO_SCTP:
2594                if (!spec->sctp_okay ||
2595                    skb->csum_offset != offsetof(struct sctphdr, checksum))
2596                        goto cant_help;
2597                break;
2598        case NEXTHDR_HOP:
2599        case NEXTHDR_ROUTING:
2600        case NEXTHDR_DEST: {
2601                u8 *opthdr = nhdr;
2602
2603                if (protocol != IPPROTO_IPV6 || !spec->ext_hdrs_okay)
2604                        goto need_help;
2605
2606                ip_proto = opthdr[0];
2607                nhdr += (opthdr[1] + 1) << 3;
2608
2609                goto ip_proto_again;
2610        }
2611        default:
2612                goto need_help;
2613        }
2614
2615        /* Passed the tests for offloading checksum */
2616        return true;
2617
2618need_help:
2619        if (csum_help && !skb_shinfo(skb)->gso_size)
2620                skb_checksum_help(skb);
2621cant_help:
2622        return false;
2623}
2624EXPORT_SYMBOL(__skb_csum_offload_chk);
2625
2626__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2627{
2628        __be16 type = skb->protocol;
2629
2630        /* Tunnel gso handlers can set protocol to ethernet. */
2631        if (type == htons(ETH_P_TEB)) {
2632                struct ethhdr *eth;
2633
2634                if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2635                        return 0;
2636
2637                eth = (struct ethhdr *)skb_mac_header(skb);
2638                type = eth->h_proto;
2639        }
2640
2641        return __vlan_get_protocol(skb, type, depth);
2642}
2643
2644/**
2645 *      skb_mac_gso_segment - mac layer segmentation handler.
2646 *      @skb: buffer to segment
2647 *      @features: features for the output path (see dev->features)
2648 */
2649struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2650                                    netdev_features_t features)
2651{
2652        struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2653        struct packet_offload *ptype;
2654        int vlan_depth = skb->mac_len;
2655        __be16 type = skb_network_protocol(skb, &vlan_depth);
2656
2657        if (unlikely(!type))
2658                return ERR_PTR(-EINVAL);
2659
2660        __skb_pull(skb, vlan_depth);
2661
2662        rcu_read_lock();
2663        list_for_each_entry_rcu(ptype, &offload_base, list) {
2664                if (ptype->type == type && ptype->callbacks.gso_segment) {
2665                        segs = ptype->callbacks.gso_segment(skb, features);
2666                        break;
2667                }
2668        }
2669        rcu_read_unlock();
2670
2671        __skb_push(skb, skb->data - skb_mac_header(skb));
2672
2673        return segs;
2674}
2675EXPORT_SYMBOL(skb_mac_gso_segment);
2676
2677
2678/* openvswitch calls this on rx path, so we need a different check.
2679 */
2680static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2681{
2682        if (tx_path)
2683                return skb->ip_summed != CHECKSUM_PARTIAL;
2684        else
2685                return skb->ip_summed == CHECKSUM_NONE;
2686}
2687
2688/**
2689 *      __skb_gso_segment - Perform segmentation on skb.
2690 *      @skb: buffer to segment
2691 *      @features: features for the output path (see dev->features)
2692 *      @tx_path: whether it is called in TX path
2693 *
2694 *      This function segments the given skb and returns a list of segments.
2695 *
2696 *      It may return NULL if the skb requires no segmentation.  This is
2697 *      only possible when GSO is used for verifying header integrity.
2698 *
2699 *      Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
2700 */
2701struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2702                                  netdev_features_t features, bool tx_path)
2703{
2704        if (unlikely(skb_needs_check(skb, tx_path))) {
2705                int err;
2706
2707                skb_warn_bad_offload(skb);
2708
2709                err = skb_cow_head(skb, 0);
2710                if (err < 0)
2711                        return ERR_PTR(err);
2712        }
2713
2714        BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
2715                     sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
2716
2717        SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
2718        SKB_GSO_CB(skb)->encap_level = 0;
2719
2720        skb_reset_mac_header(skb);
2721        skb_reset_mac_len(skb);
2722
2723        return skb_mac_gso_segment(skb, features);
2724}
2725EXPORT_SYMBOL(__skb_gso_segment);
2726
2727/* Take action when hardware reception checksum errors are detected. */
2728#ifdef CONFIG_BUG
2729void netdev_rx_csum_fault(struct net_device *dev)
2730{
2731        if (net_ratelimit()) {
2732                pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2733                dump_stack();
2734        }
2735}
2736EXPORT_SYMBOL(netdev_rx_csum_fault);
2737#endif
2738
2739/* Actually, we should eliminate this check as soon as we know, that:
2740 * 1. IOMMU is present and allows to map all the memory.
2741 * 2. No high memory really exists on this machine.
2742 */
2743
2744static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2745{
2746#ifdef CONFIG_HIGHMEM
2747        int i;
2748        if (!(dev->features & NETIF_F_HIGHDMA)) {
2749                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2750                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2751                        if (PageHighMem(skb_frag_page(frag)))
2752                                return 1;
2753                }
2754        }
2755
2756        if (PCI_DMA_BUS_IS_PHYS) {
2757                struct device *pdev = dev->dev.parent;
2758
2759                if (!pdev)
2760                        return 0;
2761                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2762                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2763                        dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2764                        if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2765                                return 1;
2766                }
2767        }
2768#endif
2769        return 0;
2770}
2771
2772/* If MPLS offload request, verify we are testing hardware MPLS features
2773 * instead of standard features for the netdev.
2774 */
2775#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
2776static netdev_features_t net_mpls_features(struct sk_buff *skb,
2777                                           netdev_features_t features,
2778                                           __be16 type)
2779{
2780        if (eth_p_mpls(type))
2781                features &= skb->dev->mpls_features;
2782
2783        return features;
2784}
2785#else
2786static netdev_features_t net_mpls_features(struct sk_buff *skb,
2787                                           netdev_features_t features,
2788                                           __be16 type)
2789{
2790        return features;
2791}
2792#endif
2793
2794static netdev_features_t harmonize_features(struct sk_buff *skb,
2795        netdev_features_t features)
2796{
2797        int tmp;
2798        __be16 type;
2799
2800        type = skb_network_protocol(skb, &tmp);
2801        features = net_mpls_features(skb, features, type);
2802
2803        if (skb->ip_summed != CHECKSUM_NONE &&
2804            !can_checksum_protocol(features, type)) {
2805                features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2806        } else if (illegal_highdma(skb->dev, skb)) {
2807                features &= ~NETIF_F_SG;
2808        }
2809
2810        return features;
2811}
2812
2813netdev_features_t passthru_features_check(struct sk_buff *skb,
2814                                          struct net_device *dev,
2815                                          netdev_features_t features)
2816{
2817        return features;
2818}
2819EXPORT_SYMBOL(passthru_features_check);
2820
2821static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2822                                             struct net_device *dev,
2823                                             netdev_features_t features)
2824{
2825        return vlan_features_check(skb, features);
2826}
2827
2828netdev_features_t netif_skb_features(struct sk_buff *skb)
2829{
2830        struct net_device *dev = skb->dev;
2831        netdev_features_t features = dev->features;
2832        u16 gso_segs = skb_shinfo(skb)->gso_segs;
2833
2834        if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
2835                features &= ~NETIF_F_GSO_MASK;
2836
2837        /* If encapsulation offload request, verify we are testing
2838         * hardware encapsulation features instead of standard
2839         * features for the netdev
2840         */
2841        if (skb->encapsulation)
2842                features &= dev->hw_enc_features;
2843
2844        if (skb_vlan_tagged(skb))
2845                features = netdev_intersect_features(features,
2846                                                     dev->vlan_features |
2847                                                     NETIF_F_HW_VLAN_CTAG_TX |
2848                                                     NETIF_F_HW_VLAN_STAG_TX);
2849
2850        if (dev->netdev_ops->ndo_features_check)
2851                features &= dev->netdev_ops->ndo_features_check(skb, dev,
2852                                                                features);
2853        else
2854                features &= dflt_features_check(skb, dev, features);
2855
2856        return harmonize_features(skb, features);
2857}
2858EXPORT_SYMBOL(netif_skb_features);
2859
2860static int xmit_one(struct sk_buff *skb, struct net_device *dev,
2861                    struct netdev_queue *txq, bool more)
2862{
2863        unsigned int len;
2864        int rc;
2865
2866        if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
2867                dev_queue_xmit_nit(skb, dev);
2868
2869        len = skb->len;
2870        trace_net_dev_start_xmit(skb, dev);
2871        rc = netdev_start_xmit(skb, dev, txq, more);
2872        trace_net_dev_xmit(skb, rc, dev, len);
2873
2874        return rc;
2875}
2876
2877struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2878                                    struct netdev_queue *txq, int *ret)
2879{
2880        struct sk_buff *skb = first;
2881        int rc = NETDEV_TX_OK;
2882
2883        while (skb) {
2884                struct sk_buff *next = skb->next;
2885
2886                skb->next = NULL;
2887                rc = xmit_one(skb, dev, txq, next != NULL);
2888                if (unlikely(!dev_xmit_complete(rc))) {
2889                        skb->next = next;
2890                        goto out;
2891                }
2892
2893                skb = next;
2894                if (netif_xmit_stopped(txq) && skb) {
2895                        rc = NETDEV_TX_BUSY;
2896                        break;
2897                }
2898        }
2899
2900out:
2901        *ret = rc;
2902        return skb;
2903}
2904
2905static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2906                                          netdev_features_t features)
2907{
2908        if (skb_vlan_tag_present(skb) &&
2909            !vlan_hw_offload_capable(features, skb->vlan_proto))
2910                skb = __vlan_hwaccel_push_inside(skb);
2911        return skb;
2912}
2913
2914static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
2915{
2916        netdev_features_t features;
2917
2918        if (skb->next)
2919                return skb;
2920
2921        features = netif_skb_features(skb);
2922        skb = validate_xmit_vlan(skb, features);
2923        if (unlikely(!skb))
2924                goto out_null;
2925
2926        if (netif_needs_gso(skb, features)) {
2927                struct sk_buff *segs;
2928
2929                segs = skb_gso_segment(skb, features);
2930                if (IS_ERR(segs)) {
2931                        goto out_kfree_skb;
2932                } else if (segs) {
2933                        consume_skb(skb);
2934                        skb = segs;
2935                }
2936        } else {
2937                if (skb_needs_linearize(skb, features) &&
2938                    __skb_linearize(skb))
2939                        goto out_kfree_skb;
2940
2941                /* If packet is not checksummed and device does not
2942                 * support checksumming for this protocol, complete
2943                 * checksumming here.
2944                 */
2945                if (skb->ip_summed == CHECKSUM_PARTIAL) {
2946                        if (skb->encapsulation)
2947                                skb_set_inner_transport_header(skb,
2948                                                               skb_checksum_start_offset(skb));
2949                        else
2950                                skb_set_transport_header(skb,
2951                                                         skb_checksum_start_offset(skb));
2952                        if (!(features & NETIF_F_CSUM_MASK) &&
2953                            skb_checksum_help(skb))
2954                                goto out_kfree_skb;
2955                }
2956        }
2957
2958        return skb;
2959
2960out_kfree_skb:
2961        kfree_skb(skb);
2962out_null:
2963        return NULL;
2964}
2965
2966struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
2967{
2968        struct sk_buff *next, *head = NULL, *tail;
2969
2970        for (; skb != NULL; skb = next) {
2971                next = skb->next;
2972                skb->next = NULL;
2973
2974                /* in case skb wont be segmented, point to itself */
2975                skb->prev = skb;
2976
2977                skb = validate_xmit_skb(skb, dev);
2978                if (!skb)
2979                        continue;
2980
2981                if (!head)
2982                        head = skb;
2983                else
2984                        tail->next = skb;
2985                /* If skb was segmented, skb->prev points to
2986                 * the last segment. If not, it still contains skb.
2987                 */
2988                tail = skb->prev;
2989        }
2990        return head;
2991}
2992
2993static void qdisc_pkt_len_init(struct sk_buff *skb)
2994{
2995        const struct skb_shared_info *shinfo = skb_shinfo(skb);
2996
2997        qdisc_skb_cb(skb)->pkt_len = skb->len;
2998
2999        /* To get more precise estimation of bytes sent on wire,
3000         * we add to pkt_len the headers size of all segments
3001         */
3002        if (shinfo->gso_size)  {
3003                unsigned int hdr_len;
3004                u16 gso_segs = shinfo->gso_segs;
3005
3006                /* mac layer + network layer */
3007                hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3008
3009                /* + transport layer */
3010                if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3011                        hdr_len += tcp_hdrlen(skb);
3012                else
3013                        hdr_len += sizeof(struct udphdr);
3014
3015                if (shinfo->gso_type & SKB_GSO_DODGY)
3016                        gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3017                                                shinfo->gso_size);
3018
3019                qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3020        }
3021}
3022
3023static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3024                                 struct net_device *dev,
3025                                 struct netdev_queue *txq)
3026{
3027        spinlock_t *root_lock = qdisc_lock(q);
3028        bool contended;
3029        int rc;
3030
3031        qdisc_calculate_pkt_len(skb, q);
3032        /*
3033         * Heuristic to force contended enqueues to serialize on a
3034         * separate lock before trying to get qdisc main lock.
3035         * This permits __QDISC___STATE_RUNNING owner to get the lock more
3036         * often and dequeue packets faster.
3037         */
3038        contended = qdisc_is_running(q);
3039        if (unlikely(contended))
3040                spin_lock(&q->busylock);
3041
3042        spin_lock(root_lock);
3043        if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3044                kfree_skb(skb);
3045                rc = NET_XMIT_DROP;
3046        } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3047                   qdisc_run_begin(q)) {
3048                /*
3049                 * This is a work-conserving queue; there are no old skbs
3050                 * waiting to be sent out; and the qdisc is not running -
3051                 * xmit the skb directly.
3052                 */
3053
3054                qdisc_bstats_update(q, skb);
3055
3056                if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3057                        if (unlikely(contended)) {
3058                                spin_unlock(&q->busylock);
3059                                contended = false;
3060                        }
3061                        __qdisc_run(q);
3062                } else
3063                        qdisc_run_end(q);
3064
3065                rc = NET_XMIT_SUCCESS;
3066        } else {
3067                rc = q->enqueue(skb, q) & NET_XMIT_MASK;
3068                if (qdisc_run_begin(q)) {
3069                        if (unlikely(contended)) {
3070                                spin_unlock(&q->busylock);
3071                                contended = false;
3072                        }
3073                        __qdisc_run(q);
3074                }
3075        }
3076        spin_unlock(root_lock);
3077        if (unlikely(contended))
3078                spin_unlock(&q->busylock);
3079        return rc;
3080}
3081
3082#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3083static void skb_update_prio(struct sk_buff *skb)
3084{
3085        struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
3086
3087        if (!skb->priority && skb->sk && map) {
3088                unsigned int prioidx =
3089                        sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
3090
3091                if (prioidx < map->priomap_len)
3092                        skb->priority = map->priomap[prioidx];
3093        }
3094}
3095#else
3096#define skb_update_prio(skb)
3097#endif
3098
3099DEFINE_PER_CPU(int, xmit_recursion);
3100EXPORT_SYMBOL(xmit_recursion);
3101
3102#define RECURSION_LIMIT 10
3103
3104/**
3105 *      dev_loopback_xmit - loop back @skb
3106 *      @net: network namespace this loopback is happening in
3107 *      @sk:  sk needed to be a netfilter okfn
3108 *      @skb: buffer to transmit
3109 */
3110int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3111{
3112        skb_reset_mac_header(skb);
3113        __skb_pull(skb, skb_network_offset(skb));
3114        skb->pkt_type = PACKET_LOOPBACK;
3115        skb->ip_summed = CHECKSUM_UNNECESSARY;
3116        WARN_ON(!skb_dst(skb));
3117        skb_dst_force(skb);
3118        netif_rx_ni(skb);
3119        return 0;
3120}
3121EXPORT_SYMBOL(dev_loopback_xmit);
3122
3123#ifdef CONFIG_NET_EGRESS
3124static struct sk_buff *
3125sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3126{
3127        struct tcf_proto *cl = rcu_dereference_bh(dev->egress_cl_list);
3128        struct tcf_result cl_res;
3129
3130        if (!cl)
3131                return skb;
3132
3133        /* skb->tc_verd and qdisc_skb_cb(skb)->pkt_len were already set
3134         * earlier by the caller.
3135         */
3136        qdisc_bstats_cpu_update(cl->q, skb);
3137
3138        switch (tc_classify(skb, cl, &cl_res, false)) {
3139        case TC_ACT_OK:
3140        case TC_ACT_RECLASSIFY:
3141                skb->tc_index = TC_H_MIN(cl_res.classid);
3142                break;
3143        case TC_ACT_SHOT:
3144                qdisc_qstats_cpu_drop(cl->q);
3145                *ret = NET_XMIT_DROP;
3146                goto drop;
3147        case TC_ACT_STOLEN:
3148        case TC_ACT_QUEUED:
3149                *ret = NET_XMIT_SUCCESS;
3150drop:
3151                kfree_skb(skb);
3152                return NULL;
3153        case TC_ACT_REDIRECT:
3154                /* No need to push/pop skb's mac_header here on egress! */
3155                skb_do_redirect(skb);
3156                *ret = NET_XMIT_SUCCESS;
3157                return NULL;
3158        default:
3159                break;
3160        }
3161
3162        return skb;
3163}
3164#endif /* CONFIG_NET_EGRESS */
3165
3166static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
3167{
3168#ifdef CONFIG_XPS
3169        struct xps_dev_maps *dev_maps;
3170        struct xps_map *map;
3171        int queue_index = -1;
3172
3173        rcu_read_lock();
3174        dev_maps = rcu_dereference(dev->xps_maps);
3175        if (dev_maps) {
3176                map = rcu_dereference(
3177                    dev_maps->cpu_map[skb->sender_cpu - 1]);
3178                if (map) {
3179                        if (map->len == 1)
3180                                queue_index = map->queues[0];
3181                        else
3182                                queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
3183                                                                           map->len)];
3184                        if (unlikely(queue_index >= dev->real_num_tx_queues))
3185                                queue_index = -1;
3186                }
3187        }
3188        rcu_read_unlock();
3189
3190        return queue_index;
3191#else
3192        return -1;
3193#endif
3194}
3195
3196static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
3197{
3198        struct sock *sk = skb->sk;
3199        int queue_index = sk_tx_queue_get(sk);
3200
3201        if (queue_index < 0 || skb->ooo_okay ||
3202            queue_index >= dev->real_num_tx_queues) {
3203                int new_index = get_xps_queue(dev, skb);
3204                if (new_index < 0)
3205                        new_index = skb_tx_hash(dev, skb);
3206
3207                if (queue_index != new_index && sk &&
3208                    sk_fullsock(sk) &&
3209                    rcu_access_pointer(sk->sk_dst_cache))
3210                        sk_tx_queue_set(sk, new_index);
3211
3212                queue_index = new_index;
3213        }
3214
3215        return queue_index;
3216}
3217
3218struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3219                                    struct sk_buff *skb,
3220                                    void *accel_priv)
3221{
3222        int queue_index = 0;
3223
3224#ifdef CONFIG_XPS
3225        u32 sender_cpu = skb->sender_cpu - 1;
3226
3227        if (sender_cpu >= (u32)NR_CPUS)
3228                skb->sender_cpu = raw_smp_processor_id() + 1;
3229#endif
3230
3231        if (dev->real_num_tx_queues != 1) {
3232                const struct net_device_ops *ops = dev->netdev_ops;
3233                if (ops->ndo_select_queue)
3234                        queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3235                                                            __netdev_pick_tx);
3236                else
3237                        queue_index = __netdev_pick_tx(dev, skb);
3238
3239                if (!accel_priv)
3240                        queue_index = netdev_cap_txqueue(dev, queue_index);
3241        }
3242
3243        skb_set_queue_mapping(skb, queue_index);
3244        return netdev_get_tx_queue(dev, queue_index);
3245}
3246
3247/**
3248 *      __dev_queue_xmit - transmit a buffer
3249 *      @skb: buffer to transmit
3250 *      @accel_priv: private data used for L2 forwarding offload
3251 *
3252 *      Queue a buffer for transmission to a network device. The caller must
3253 *      have set the device and priority and built the buffer before calling
3254 *      this function. The function can be called from an interrupt.
3255 *
3256 *      A negative errno code is returned on a failure. A success does not
3257 *      guarantee the frame will be transmitted as it may be dropped due
3258 *      to congestion or traffic shaping.
3259 *
3260 * -----------------------------------------------------------------------------------
3261 *      I notice this method can also return errors from the queue disciplines,
3262 *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
3263 *      be positive.
3264 *
3265 *      Regardless of the return value, the skb is consumed, so it is currently
3266 *      difficult to retry a send to this method.  (You can bump the ref count
3267 *      before sending to hold a reference for retry if you are careful.)
3268 *
3269 *      When calling this method, interrupts MUST be enabled.  This is because
3270 *      the BH enable code must have IRQs enabled so that it will not deadlock.
3271 *          --BLG
3272 */
3273static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
3274{
3275        struct net_device *dev = skb->dev;
3276        struct netdev_queue *txq;
3277        struct Qdisc *q;
3278        int rc = -ENOMEM;
3279
3280        skb_reset_mac_header(skb);
3281
3282        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3283                __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3284
3285        /* Disable soft irqs for various locks below. Also
3286         * stops preemption for RCU.
3287         */
3288        rcu_read_lock_bh();
3289
3290        skb_update_prio(skb);
3291
3292        qdisc_pkt_len_init(skb);
3293#ifdef CONFIG_NET_CLS_ACT
3294        skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
3295# ifdef CONFIG_NET_EGRESS
3296        if (static_key_false(&egress_needed)) {
3297                skb = sch_handle_egress(skb, &rc, dev);
3298                if (!skb)
3299                        goto out;
3300        }
3301# endif
3302#endif
3303        /* If device/qdisc don't need skb->dst, release it right now while
3304         * its hot in this cpu cache.
3305         */
3306        if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3307                skb_dst_drop(skb);
3308        else
3309                skb_dst_force(skb);
3310
3311#ifdef CONFIG_NET_SWITCHDEV
3312        /* Don't forward if offload device already forwarded */
3313        if (skb->offload_fwd_mark &&
3314            skb->offload_fwd_mark == dev->offload_fwd_mark) {
3315                consume_skb(skb);
3316                rc = NET_XMIT_SUCCESS;
3317                goto out;
3318        }
3319#endif
3320
3321        txq = netdev_pick_tx(dev, skb, accel_priv);
3322        q = rcu_dereference_bh(txq->qdisc);
3323
3324        trace_net_dev_queue(skb);
3325        if (q->enqueue) {
3326                rc = __dev_xmit_skb(skb, q, dev, txq);
3327                goto out;
3328        }
3329
3330        /* The device has no queue. Common case for software devices:
3331           loopback, all the sorts of tunnels...
3332
3333           Really, it is unlikely that netif_tx_lock protection is necessary
3334           here.  (f.e. loopback and IP tunnels are clean ignoring statistics
3335           counters.)
3336           However, it is possible, that they rely on protection
3337           made by us here.
3338
3339           Check this and shot the lock. It is not prone from deadlocks.
3340           Either shot noqueue qdisc, it is even simpler 8)
3341         */
3342        if (dev->flags & IFF_UP) {
3343                int cpu = smp_processor_id(); /* ok because BHs are off */
3344
3345                if (txq->xmit_lock_owner != cpu) {
3346
3347                        if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
3348                                goto recursion_alert;
3349
3350                        skb = validate_xmit_skb(skb, dev);
3351                        if (!skb)
3352                                goto drop;
3353
3354                        HARD_TX_LOCK(dev, txq, cpu);
3355
3356                        if (!netif_xmit_stopped(txq)) {
3357                                __this_cpu_inc(xmit_recursion);
3358                                skb = dev_hard_start_xmit(skb, dev, txq, &rc);
3359                                __this_cpu_dec(xmit_recursion);
3360                                if (dev_xmit_complete(rc)) {
3361                                        HARD_TX_UNLOCK(dev, txq);
3362                                        goto out;
3363                                }
3364                        }
3365                        HARD_TX_UNLOCK(dev, txq);
3366                        net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3367                                             dev->name);
3368                } else {
3369                        /* Recursion is detected! It is possible,
3370                         * unfortunately
3371                         */
3372recursion_alert:
3373                        net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3374                                             dev->name);
3375                }
3376        }
3377
3378        rc = -ENETDOWN;
3379drop:
3380        rcu_read_unlock_bh();
3381
3382        atomic_long_inc(&dev->tx_dropped);
3383        kfree_skb_list(skb);
3384        return rc;
3385out:
3386        rcu_read_unlock_bh();
3387        return rc;
3388}
3389
3390int dev_queue_xmit(struct sk_buff *skb)
3391{
3392        return __dev_queue_xmit(skb, NULL);
3393}
3394EXPORT_SYMBOL(dev_queue_xmit);
3395
3396int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3397{
3398        return __dev_queue_xmit(skb, accel_priv);
3399}
3400EXPORT_SYMBOL(dev_queue_xmit_accel);
3401
3402
3403/*=======================================================================
3404                        Receiver routines
3405  =======================================================================*/
3406
3407int netdev_max_backlog __read_mostly = 1000;
3408EXPORT_SYMBOL(netdev_max_backlog);
3409
3410int netdev_tstamp_prequeue __read_mostly = 1;
3411int netdev_budget __read_mostly = 300;
3412int weight_p __read_mostly = 64;            /* old backlog weight */
3413
3414/* Called with irq disabled */
3415static inline void ____napi_schedule(struct softnet_data *sd,
3416                                     struct napi_struct *napi)
3417{
3418        list_add_tail(&napi->poll_list, &sd->poll_list);
3419        __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3420}
3421
3422#ifdef CONFIG_RPS
3423
3424/* One global table that all flow-based protocols share. */
3425struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
3426EXPORT_SYMBOL(rps_sock_flow_table);
3427u32 rps_cpu_mask __read_mostly;
3428EXPORT_SYMBOL(rps_cpu_mask);
3429
3430struct static_key rps_needed __read_mostly;
3431
3432static struct rps_dev_flow *
3433set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3434            struct rps_dev_flow *rflow, u16 next_cpu)
3435{
3436        if (next_cpu < nr_cpu_ids) {
3437#ifdef CONFIG_RFS_ACCEL
3438                struct netdev_rx_queue *rxqueue;
3439                struct rps_dev_flow_table *flow_table;
3440                struct rps_dev_flow *old_rflow;
3441                u32 flow_id;
3442                u16 rxq_index;
3443                int rc;
3444
3445                /* Should we steer this flow to a different hardware queue? */
3446                if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3447                    !(dev->features & NETIF_F_NTUPLE))
3448                        goto out;
3449                rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3450                if (rxq_index == skb_get_rx_queue(skb))
3451                        goto out;
3452
3453                rxqueue = dev->_rx + rxq_index;
3454                flow_table = rcu_dereference(rxqueue->rps_flow_table);
3455                if (!flow_table)
3456                        goto out;
3457                flow_id = skb_get_hash(skb) & flow_table->mask;
3458                rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3459                                                        rxq_index, flow_id);
3460                if (rc < 0)
3461                        goto out;
3462                old_rflow = rflow;
3463                rflow = &flow_table->flows[flow_id];
3464                rflow->filter = rc;
3465                if (old_rflow->filter == rflow->filter)
3466                        old_rflow->filter = RPS_NO_FILTER;
3467        out:
3468#endif
3469                rflow->last_qtail =
3470                        per_cpu(softnet_data, next_cpu).input_queue_head;
3471        }
3472
3473        rflow->cpu = next_cpu;
3474        return rflow;
3475}
3476
3477/*
3478 * get_rps_cpu is called from netif_receive_skb and returns the target
3479 * CPU from the RPS map of the receiving queue for a given skb.
3480 * rcu_read_lock must be held on entry.
3481 */
3482static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3483                       struct rps_dev_flow **rflowp)
3484{
3485        const struct rps_sock_flow_table *sock_flow_table;
3486        struct netdev_rx_queue *rxqueue = dev->_rx;
3487        struct rps_dev_flow_table *flow_table;
3488        struct rps_map *map;
3489        int cpu = -1;
3490        u32 tcpu;
3491        u32 hash;
3492
3493        if (skb_rx_queue_recorded(skb)) {
3494                u16 index = skb_get_rx_queue(skb);
3495
3496                if (unlikely(index >= dev->real_num_rx_queues)) {
3497                        WARN_ONCE(dev->real_num_rx_queues > 1,
3498                                  "%s received packet on queue %u, but number "
3499                                  "of RX queues is %u\n",
3500                                  dev->name, index, dev->real_num_rx_queues);
3501                        goto done;
3502                }
3503                rxqueue += index;
3504        }
3505
3506        /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3507
3508        flow_table = rcu_dereference(rxqueue->rps_flow_table);
3509        map = rcu_dereference(rxqueue->rps_map);
3510        if (!flow_table && !map)
3511                goto done;
3512
3513        skb_reset_network_header(skb);
3514        hash = skb_get_hash(skb);
3515        if (!hash)
3516                goto done;
3517
3518        sock_flow_table = rcu_dereference(rps_sock_flow_table);
3519        if (flow_table && sock_flow_table) {
3520                struct rps_dev_flow *rflow;
3521                u32 next_cpu;
3522                u32 ident;
3523
3524                /* First check into global flow table if there is a match */
3525                ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3526                if ((ident ^ hash) & ~rps_cpu_mask)
3527                        goto try_rps;
3528
3529                next_cpu = ident & rps_cpu_mask;
3530
3531                /* OK, now we know there is a match,
3532                 * we can look at the local (per receive queue) flow table
3533                 */
3534                rflow = &flow_table->flows[hash & flow_table->mask];
3535                tcpu = rflow->cpu;
3536
3537                /*
3538                 * If the desired CPU (where last recvmsg was done) is
3539                 * different from current CPU (one in the rx-queue flow
3540                 * table entry), switch if one of the following holds:
3541                 *   - Current CPU is unset (>= nr_cpu_ids).
3542                 *   - Current CPU is offline.
3543                 *   - The current CPU's queue tail has advanced beyond the
3544                 *     last packet that was enqueued using this table entry.
3545                 *     This guarantees that all previous packets for the flow
3546                 *     have been dequeued, thus preserving in order delivery.
3547                 */
3548                if (unlikely(tcpu != next_cpu) &&
3549                    (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
3550                     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
3551                      rflow->last_qtail)) >= 0)) {
3552                        tcpu = next_cpu;
3553                        rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
3554                }
3555
3556                if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
3557                        *rflowp = rflow;
3558                        cpu = tcpu;
3559                        goto done;
3560                }
3561        }
3562
3563try_rps:
3564
3565        if (map) {
3566                tcpu = map->cpus[reciprocal_scale(hash, map->len)];
3567                if (cpu_online(tcpu)) {
3568                        cpu = tcpu;
3569                        goto done;
3570                }
3571        }
3572
3573done:
3574        return cpu;
3575}
3576
3577#ifdef CONFIG_RFS_ACCEL
3578
3579/**
3580 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3581 * @dev: Device on which the filter was set
3582 * @rxq_index: RX queue index
3583 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3584 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3585 *
3586 * Drivers that implement ndo_rx_flow_steer() should periodically call
3587 * this function for each installed filter and remove the filters for
3588 * which it returns %true.
3589 */
3590bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3591                         u32 flow_id, u16 filter_id)
3592{
3593        struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3594        struct rps_dev_flow_table *flow_table;
3595        struct rps_dev_flow *rflow;
3596        bool expire = true;
3597        unsigned int cpu;
3598
3599        rcu_read_lock();
3600        flow_table = rcu_dereference(rxqueue->rps_flow_table);
3601        if (flow_table && flow_id <= flow_table->mask) {
3602                rflow = &flow_table->flows[flow_id];
3603                cpu = ACCESS_ONCE(rflow->cpu);
3604                if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
3605                    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3606                           rflow->last_qtail) <
3607                     (int)(10 * flow_table->mask)))
3608                        expire = false;
3609        }
3610        rcu_read_unlock();
3611        return expire;
3612}
3613EXPORT_SYMBOL(rps_may_expire_flow);
3614
3615#endif /* CONFIG_RFS_ACCEL */
3616
3617/* Called from hardirq (IPI) context */
3618static void rps_trigger_softirq(void *data)
3619{
3620        struct softnet_data *sd = data;
3621
3622        ____napi_schedule(sd, &sd->backlog);
3623        sd->received_rps++;
3624}
3625
3626#endif /* CONFIG_RPS */
3627
3628/*
3629 * Check if this softnet_data structure is another cpu one
3630 * If yes, queue it to our IPI list and return 1
3631 * If no, return 0
3632 */
3633static int rps_ipi_queued(struct softnet_data *sd)
3634{
3635#ifdef CONFIG_RPS
3636        struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
3637
3638        if (sd != mysd) {
3639                sd->rps_ipi_next = mysd->rps_ipi_list;
3640                mysd->rps_ipi_list = sd;
3641
3642                __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3643                return 1;
3644        }
3645#endif /* CONFIG_RPS */
3646        return 0;
3647}
3648
3649#ifdef CONFIG_NET_FLOW_LIMIT
3650int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3651#endif
3652
3653static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3654{
3655#ifdef CONFIG_NET_FLOW_LIMIT
3656        struct sd_flow_limit *fl;
3657        struct softnet_data *sd;
3658        unsigned int old_flow, new_flow;
3659
3660        if (qlen < (netdev_max_backlog >> 1))
3661                return false;
3662
3663        sd = this_cpu_ptr(&softnet_data);
3664
3665        rcu_read_lock();
3666        fl = rcu_dereference(sd->flow_limit);
3667        if (fl) {
3668                new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
3669                old_flow = fl->history[fl->history_head];
3670                fl->history[fl->history_head] = new_flow;
3671
3672                fl->history_head++;
3673                fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3674
3675                if (likely(fl->buckets[old_flow]))
3676                        fl->buckets[old_flow]--;
3677
3678                if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3679                        fl->count++;
3680                        rcu_read_unlock();
3681                        return true;
3682                }
3683        }
3684        rcu_read_unlock();
3685#endif
3686        return false;
3687}
3688
3689/*
3690 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3691 * queue (may be a remote CPU queue).
3692 */
3693static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3694                              unsigned int *qtail)
3695{
3696        struct softnet_data *sd;
3697        unsigned long flags;
3698        unsigned int qlen;
3699
3700        sd = &per_cpu(softnet_data, cpu);
3701
3702        local_irq_save(flags);
3703
3704        rps_lock(sd);
3705        if (!netif_running(skb->dev))
3706                goto drop;
3707        qlen = skb_queue_len(&sd->input_pkt_queue);
3708        if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
3709                if (qlen) {
3710enqueue:
3711                        __skb_queue_tail(&sd->input_pkt_queue, skb);
3712                        input_queue_tail_incr_save(sd, qtail);
3713                        rps_unlock(sd);
3714                        local_irq_restore(flags);
3715                        return NET_RX_SUCCESS;
3716                }
3717
3718                /* Schedule NAPI for backlog device
3719                 * We can use non atomic operation since we own the queue lock
3720                 */
3721                if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
3722                        if (!rps_ipi_queued(sd))
3723                                ____napi_schedule(sd, &sd->backlog);
3724                }
3725                goto enqueue;
3726        }
3727
3728drop:
3729        sd->dropped++;
3730        rps_unlock(sd);
3731
3732        local_irq_restore(flags);
3733
3734        atomic_long_inc(&skb->dev->rx_dropped);
3735        kfree_skb(skb);
3736        return NET_RX_DROP;
3737}
3738
3739static int netif_rx_internal(struct sk_buff *skb)
3740{
3741        int ret;
3742
3743        net_timestamp_check(netdev_tstamp_prequeue, skb);
3744
3745        trace_netif_rx(skb);
3746#ifdef CONFIG_RPS
3747        if (static_key_false(&rps_needed)) {
3748                struct rps_dev_flow voidflow, *rflow = &voidflow;
3749                int cpu;
3750
3751                preempt_disable();
3752                rcu_read_lock();
3753
3754                cpu = get_rps_cpu(skb->dev, skb, &rflow);
3755                if (cpu < 0)
3756                        cpu = smp_processor_id();
3757
3758                ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3759
3760                rcu_read_unlock();
3761                preempt_enable();
3762        } else
3763#endif
3764        {
3765                unsigned int qtail;
3766                ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3767                put_cpu();
3768        }
3769        return ret;
3770}
3771
3772/**
3773 *      netif_rx        -       post buffer to the network code
3774 *      @skb: buffer to post
3775 *
3776 *      This function receives a packet from a device driver and queues it for
3777 *      the upper (protocol) levels to process.  It always succeeds. The buffer
3778 *      may be dropped during processing for congestion control or by the
3779 *      protocol layers.
3780 *
3781 *      return values:
3782 *      NET_RX_SUCCESS  (no congestion)
3783 *      NET_RX_DROP     (packet was dropped)
3784 *
3785 */
3786
3787int netif_rx(struct sk_buff *skb)
3788{
3789        trace_netif_rx_entry(skb);
3790
3791        return netif_rx_internal(skb);
3792}
3793EXPORT_SYMBOL(netif_rx);
3794
3795int netif_rx_ni(struct sk_buff *skb)
3796{
3797        int err;
3798
3799        trace_netif_rx_ni_entry(skb);
3800
3801        preempt_disable();
3802        err = netif_rx_internal(skb);
3803        if (local_softirq_pending())
3804                do_softirq();
3805        preempt_enable();
3806
3807        return err;
3808}
3809EXPORT_SYMBOL(netif_rx_ni);
3810
3811static void net_tx_action(struct softirq_action *h)
3812{
3813        struct softnet_data *sd = this_cpu_ptr(&softnet_data);
3814
3815        if (sd->completion_queue) {
3816                struct sk_buff *clist;
3817
3818                local_irq_disable();
3819                clist = sd->completion_queue;
3820                sd->completion_queue = NULL;
3821                local_irq_enable();
3822
3823                while (clist) {
3824                        struct sk_buff *skb = clist;
3825                        clist = clist->next;
3826
3827                        WARN_ON(atomic_read(&skb->users));
3828                        if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3829                                trace_consume_skb(skb);
3830                        else
3831                                trace_kfree_skb(skb, net_tx_action);
3832
3833                        if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
3834                                __kfree_skb(skb);
3835                        else
3836                                __kfree_skb_defer(skb);
3837                }
3838
3839                __kfree_skb_flush();
3840        }
3841
3842        if (sd->output_queue) {
3843                struct Qdisc *head;
3844
3845                local_irq_disable();
3846                head = sd->output_queue;
3847                sd->output_queue = NULL;
3848                sd->output_queue_tailp = &sd->output_queue;
3849                local_irq_enable();
3850
3851                while (head) {
3852                        struct Qdisc *q = head;
3853                        spinlock_t *root_lock;
3854
3855                        head = head->next_sched;
3856
3857                        root_lock = qdisc_lock(q);
3858                        if (spin_trylock(root_lock)) {
3859                                smp_mb__before_atomic();
3860                                clear_bit(__QDISC_STATE_SCHED,
3861                                          &q->state);
3862                                qdisc_run(q);
3863                                spin_unlock(root_lock);
3864                        } else {
3865                                if (!test_bit(__QDISC_STATE_DEACTIVATED,
3866                                              &q->state)) {
3867                                        __netif_reschedule(q);
3868                                } else {
3869                                        smp_mb__before_atomic();
3870                                        clear_bit(__QDISC_STATE_SCHED,
3871                                                  &q->state);
3872                                }
3873                        }
3874                }
3875        }
3876}
3877
3878#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3879    (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3880/* This hook is defined here for ATM LANE */
3881int (*br_fdb_test_addr_hook)(struct net_device *dev,
3882                             unsigned char *addr) __read_mostly;
3883EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3884#endif
3885
3886static inline struct sk_buff *
3887sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
3888                   struct net_device *orig_dev)
3889{
3890#ifdef CONFIG_NET_CLS_ACT
3891        struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
3892        struct tcf_result cl_res;
3893
3894        /* If there's at least one ingress present somewhere (so
3895         * we get here via enabled static key), remaining devices
3896         * that are not configured with an ingress qdisc will bail
3897         * out here.
3898         */
3899        if (!cl)
3900                return skb;
3901        if (*pt_prev) {
3902                *ret = deliver_skb(skb, *pt_prev, orig_dev);
3903                *pt_prev = NULL;
3904        }
3905
3906        qdisc_skb_cb(skb)->pkt_len = skb->len;
3907        skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3908        qdisc_bstats_cpu_update(cl->q, skb);
3909
3910        switch (tc_classify(skb, cl, &cl_res, false)) {
3911        case TC_ACT_OK:
3912        case TC_ACT_RECLASSIFY:
3913                skb->tc_index = TC_H_MIN(cl_res.classid);
3914                break;
3915        case TC_ACT_SHOT:
3916                qdisc_qstats_cpu_drop(cl->q);
3917        case TC_ACT_STOLEN:
3918        case TC_ACT_QUEUED:
3919                kfree_skb(skb);
3920                return NULL;
3921        case TC_ACT_REDIRECT:
3922                /* skb_mac_header check was done by cls/act_bpf, so
3923                 * we can safely push the L2 header back before
3924                 * redirecting to another netdev
3925                 */
3926                __skb_push(skb, skb->mac_len);
3927                skb_do_redirect(skb);
3928                return NULL;
3929        default:
3930                break;
3931        }
3932#endif /* CONFIG_NET_CLS_ACT */
3933        return skb;
3934}
3935
3936/**
3937 *      netdev_rx_handler_register - register receive handler
3938 *      @dev: device to register a handler for
3939 *      @rx_handler: receive handler to register
3940 *      @rx_handler_data: data pointer that is used by rx handler
3941 *
3942 *      Register a receive handler for a device. This handler will then be
3943 *      called from __netif_receive_skb. A negative errno code is returned
3944 *      on a failure.
3945 *
3946 *      The caller must hold the rtnl_mutex.
3947 *
3948 *      For a general description of rx_handler, see enum rx_handler_result.
3949 */
3950int netdev_rx_handler_register(struct net_device *dev,
3951                               rx_handler_func_t *rx_handler,
3952                               void *rx_handler_data)
3953{
3954        ASSERT_RTNL();
3955
3956        if (dev->rx_handler)
3957                return -EBUSY;
3958
3959        /* Note: rx_handler_data must be set before rx_handler */
3960        rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3961        rcu_assign_pointer(dev->rx_handler, rx_handler);
3962
3963        return 0;
3964}
3965EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3966
3967/**
3968 *      netdev_rx_handler_unregister - unregister receive handler
3969 *      @dev: device to unregister a handler from
3970 *
3971 *      Unregister a receive handler from a device.
3972 *
3973 *      The caller must hold the rtnl_mutex.
3974 */
3975void netdev_rx_handler_unregister(struct net_device *dev)
3976{
3977
3978        ASSERT_RTNL();
3979        RCU_INIT_POINTER(dev->rx_handler, NULL);
3980        /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3981         * section has a guarantee to see a non NULL rx_handler_data
3982         * as well.
3983         */
3984        synchronize_net();
3985        RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3986}
3987EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3988
3989/*
3990 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3991 * the special handling of PFMEMALLOC skbs.
3992 */
3993static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3994{
3995        switch (skb->protocol) {
3996        case htons(ETH_P_ARP):
3997        case htons(ETH_P_IP):
3998        case htons(ETH_P_IPV6):
3999        case htons(ETH_P_8021Q):
4000        case htons(ETH_P_8021AD):
4001                return true;
4002        default:
4003                return false;
4004        }
4005}
4006
4007static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4008                             int *ret, struct net_device *orig_dev)
4009{
4010#ifdef CONFIG_NETFILTER_INGRESS
4011        if (nf_hook_ingress_active(skb)) {
4012                if (*pt_prev) {
4013                        *ret = deliver_skb(skb, *pt_prev, orig_dev);
4014                        *pt_prev = NULL;
4015                }
4016
4017                return nf_hook_ingress(skb);
4018        }
4019#endif /* CONFIG_NETFILTER_INGRESS */
4020        return 0;
4021}
4022
4023static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
4024{
4025        struct packet_type *ptype, *pt_prev;
4026        rx_handler_func_t *rx_handler;
4027        struct net_device *orig_dev;
4028        bool deliver_exact = false;
4029        int ret = NET_RX_DROP;
4030        __be16 type;
4031
4032        net_timestamp_check(!netdev_tstamp_prequeue, skb);
4033
4034        trace_netif_receive_skb(skb);
4035
4036        orig_dev = skb->dev;
4037
4038        skb_reset_network_header(skb);
4039        if (!skb_transport_header_was_set(skb))
4040                skb_reset_transport_header(skb);
4041        skb_reset_mac_len(skb);
4042
4043        pt_prev = NULL;
4044
4045another_round:
4046        skb->skb_iif = skb->dev->ifindex;
4047
4048        __this_cpu_inc(softnet_data.processed);
4049
4050        if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
4051            skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
4052                skb = skb_vlan_untag(skb);
4053                if (unlikely(!skb))
4054                        goto out;
4055        }
4056
4057#ifdef CONFIG_NET_CLS_ACT
4058        if (skb->tc_verd & TC_NCLS) {
4059                skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
4060                goto ncls;
4061        }
4062#endif
4063
4064        if (pfmemalloc)
4065                goto skip_taps;
4066
4067        list_for_each_entry_rcu(ptype, &ptype_all, list) {
4068                if (pt_prev)
4069                        ret = deliver_skb(skb, pt_prev, orig_dev);
4070                pt_prev = ptype;
4071        }
4072
4073        list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
4074                if (pt_prev)
4075                        ret = deliver_skb(skb, pt_prev, orig_dev);
4076                pt_prev = ptype;
4077        }
4078
4079skip_taps:
4080#ifdef CONFIG_NET_INGRESS
4081        if (static_key_false(&ingress_needed)) {
4082                skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
4083                if (!skb)
4084                        goto out;
4085
4086                if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
4087                        goto out;
4088        }
4089#endif
4090#ifdef CONFIG_NET_CLS_ACT
4091        skb->tc_verd = 0;
4092ncls:
4093#endif
4094        if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
4095                goto drop;
4096
4097        if (skb_vlan_tag_present(skb)) {
4098                if (pt_prev) {
4099                        ret = deliver_skb(skb, pt_prev, orig_dev);
4100                        pt_prev = NULL;
4101                }
4102                if (vlan_do_receive(&skb))
4103                        goto another_round;
4104                else if (unlikely(!skb))
4105                        goto out;
4106        }
4107
4108        rx_handler = rcu_dereference(skb->dev->rx_handler);
4109        if (rx_handler) {
4110                if (pt_prev) {
4111                        ret = deliver_skb(skb, pt_prev, orig_dev);
4112                        pt_prev = NULL;
4113                }
4114                switch (rx_handler(&skb)) {
4115                case RX_HANDLER_CONSUMED:
4116                        ret = NET_RX_SUCCESS;
4117                        goto out;
4118                case RX_HANDLER_ANOTHER:
4119                        goto another_round;
4120                case RX_HANDLER_EXACT:
4121                        deliver_exact = true;
4122                case RX_HANDLER_PASS:
4123                        break;
4124                default:
4125                        BUG();
4126                }
4127        }
4128
4129        if (unlikely(skb_vlan_tag_present(skb))) {
4130                if (skb_vlan_tag_get_id(skb))
4131                        skb->pkt_type = PACKET_OTHERHOST;
4132                /* Note: we might in the future use prio bits
4133                 * and set skb->priority like in vlan_do_receive()
4134                 * For the time being, just ignore Priority Code Point
4135                 */
4136                skb->vlan_tci = 0;
4137        }
4138
4139        type = skb->protocol;
4140
4141        /* deliver only exact match when indicated */
4142        if (likely(!deliver_exact)) {
4143                deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4144                                       &ptype_base[ntohs(type) &
4145                                                   PTYPE_HASH_MASK]);
4146        }
4147
4148        deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4149                               &orig_dev->ptype_specific);
4150
4151        if (unlikely(skb->dev != orig_dev)) {
4152                deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4153                                       &skb->dev->ptype_specific);
4154        }
4155
4156        if (pt_prev) {
4157                if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
4158                        goto drop;
4159                else
4160                        ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
4161        } else {
4162drop:
4163                if (!deliver_exact)
4164                        atomic_long_inc(&skb->dev->rx_dropped);
4165                else
4166                        atomic_long_inc(&skb->dev->rx_nohandler);
4167                kfree_skb(skb);
4168                /* Jamal, now you will not able to escape explaining
4169                 * me how you were going to use this. :-)
4170                 */
4171                ret = NET_RX_DROP;
4172        }
4173
4174out:
4175        return ret;
4176}
4177
4178static int __netif_receive_skb(struct sk_buff *skb)
4179{
4180        int ret;
4181
4182        if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
4183                unsigned long pflags = current->flags;
4184
4185                /*
4186                 * PFMEMALLOC skbs are special, they should
4187                 * - be delivered to SOCK_MEMALLOC sockets only
4188                 * - stay away from userspace
4189                 * - have bounded memory usage
4190                 *
4191                 * Use PF_MEMALLOC as this saves us from propagating the allocation
4192                 * context down to all allocation sites.
4193                 */
4194                current->flags |= PF_MEMALLOC;
4195                ret = __netif_receive_skb_core(skb, true);
4196                tsk_restore_flags(current, pflags, PF_MEMALLOC);
4197        } else
4198                ret = __netif_receive_skb_core(skb, false);
4199
4200        return ret;
4201}
4202
4203static int netif_receive_skb_internal(struct sk_buff *skb)
4204{
4205        int ret;
4206
4207        net_timestamp_check(netdev_tstamp_prequeue, skb);
4208
4209        if (skb_defer_rx_timestamp(skb))
4210                return NET_RX_SUCCESS;
4211
4212        rcu_read_lock();
4213
4214#ifdef CONFIG_RPS
4215        if (static_key_false(&rps_needed)) {
4216                struct rps_dev_flow voidflow, *rflow = &voidflow;
4217                int cpu = get_rps_cpu(skb->dev, skb, &rflow);
4218
4219                if (cpu >= 0) {
4220                        ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4221                        rcu_read_unlock();
4222                        return ret;
4223                }
4224        }
4225#endif
4226        ret = __netif_receive_skb(skb);
4227        rcu_read_unlock();
4228        return ret;
4229}
4230
4231/**
4232 *      netif_receive_skb - process receive buffer from network
4233 *      @skb: buffer to process
4234 *
4235 *      netif_receive_skb() is the main receive data processing function.
4236 *      It always succeeds. The buffer may be dropped during processing
4237 *      for congestion control or by the protocol layers.
4238 *
4239 *      This function may only be called from softirq context and interrupts
4240 *      should be enabled.
4241 *
4242 *      Return values (usually ignored):
4243 *      NET_RX_SUCCESS: no congestion
4244 *      NET_RX_DROP: packet was dropped
4245 */
4246int netif_receive_skb(struct sk_buff *skb)
4247{
4248        trace_netif_receive_skb_entry(skb);
4249
4250        return netif_receive_skb_internal(skb);
4251}
4252EXPORT_SYMBOL(netif_receive_skb);
4253
4254/* Network device is going away, flush any packets still pending
4255 * Called with irqs disabled.
4256 */
4257static void flush_backlog(void *arg)
4258{
4259        struct net_device *dev = arg;
4260        struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4261        struct sk_buff *skb, *tmp;
4262
4263        rps_lock(sd);
4264        skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
4265                if (skb->dev == dev) {
4266                        __skb_unlink(skb, &sd->input_pkt_queue);
4267                        kfree_skb(skb);
4268                        input_queue_head_incr(sd);
4269                }
4270        }
4271        rps_unlock(sd);
4272
4273        skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
4274                if (skb->dev == dev) {
4275                        __skb_unlink(skb, &sd->process_queue);
4276                        kfree_skb(skb);
4277                        input_queue_head_incr(sd);
4278                }
4279        }
4280}
4281
4282static int napi_gro_complete(struct sk_buff *skb)
4283{
4284        struct packet_offload *ptype;
4285        __be16 type = skb->protocol;
4286        struct list_head *head = &offload_base;
4287        int err = -ENOENT;
4288
4289        BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
4290
4291        if (NAPI_GRO_CB(skb)->count == 1) {
4292                skb_shinfo(skb)->gso_size = 0;
4293                goto out;
4294        }
4295
4296        rcu_read_lock();
4297        list_for_each_entry_rcu(ptype, head, list) {
4298                if (ptype->type != type || !ptype->callbacks.gro_complete)
4299                        continue;
4300
4301                err = ptype->callbacks.gro_complete(skb, 0);
4302                break;
4303        }
4304        rcu_read_unlock();
4305
4306        if (err) {
4307                WARN_ON(&ptype->list == head);
4308                kfree_skb(skb);
4309                return NET_RX_SUCCESS;
4310        }
4311
4312out:
4313        return netif_receive_skb_internal(skb);
4314}
4315
4316/* napi->gro_list contains packets ordered by age.
4317 * youngest packets at the head of it.
4318 * Complete skbs in reverse order to reduce latencies.
4319 */
4320void napi_gro_flush(struct napi_struct *napi, bool flush_old)
4321{
4322        struct sk_buff *skb, *prev = NULL;
4323
4324        /* scan list and build reverse chain */
4325        for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
4326                skb->prev = prev;
4327                prev = skb;
4328        }
4329
4330        for (skb = prev; skb; skb = prev) {
4331                skb->next = NULL;
4332
4333                if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4334                        return;
4335
4336                prev = skb->prev;
4337                napi_gro_complete(skb);
4338                napi->gro_count--;
4339        }
4340
4341        napi->gro_list = NULL;
4342}
4343EXPORT_SYMBOL(napi_gro_flush);
4344
4345static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
4346{
4347        struct sk_buff *p;
4348        unsigned int maclen = skb->dev->hard_header_len;
4349        u32 hash = skb_get_hash_raw(skb);
4350
4351        for (p = napi->gro_list; p; p = p->next) {
4352                unsigned long diffs;
4353
4354                NAPI_GRO_CB(p)->flush = 0;
4355
4356                if (hash != skb_get_hash_raw(p)) {
4357                        NAPI_GRO_CB(p)->same_flow = 0;
4358                        continue;
4359                }
4360
4361                diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
4362                diffs |= p->vlan_tci ^ skb->vlan_tci;
4363                diffs |= skb_metadata_dst_cmp(p, skb);
4364                if (maclen == ETH_HLEN)
4365                        diffs |= compare_ether_header(skb_mac_header(p),
4366                                                      skb_mac_header(skb));
4367                else if (!diffs)
4368                        diffs = memcmp(skb_mac_header(p),
4369                                       skb_mac_header(skb),
4370                                       maclen);
4371                NAPI_GRO_CB(p)->same_flow = !diffs;
4372        }
4373}
4374
4375static void skb_gro_reset_offset(struct sk_buff *skb)
4376{
4377        const struct skb_shared_info *pinfo = skb_shinfo(skb);
4378        const skb_frag_t *frag0 = &pinfo->frags[0];
4379
4380        NAPI_GRO_CB(skb)->data_offset = 0;
4381        NAPI_GRO_CB(skb)->frag0 = NULL;
4382        NAPI_GRO_CB(skb)->frag0_len = 0;
4383
4384        if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4385            pinfo->nr_frags &&
4386            !PageHighMem(skb_frag_page(frag0))) {
4387                NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
4388                NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
4389        }
4390}
4391
4392static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4393{
4394        struct skb_shared_info *pinfo = skb_shinfo(skb);
4395
4396        BUG_ON(skb->end - skb->tail < grow);
4397
4398        memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4399
4400        skb->data_len -= grow;
4401        skb->tail += grow;
4402
4403        pinfo->frags[0].page_offset += grow;
4404        skb_frag_size_sub(&pinfo->frags[0], grow);
4405
4406        if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4407                skb_frag_unref(skb, 0);
4408                memmove(pinfo->frags, pinfo->frags + 1,
4409                        --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4410        }
4411}
4412
4413static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4414{
4415        struct sk_buff **pp = NULL;
4416        struct packet_offload *ptype;
4417        __be16 type = skb->protocol;
4418        struct list_head *head = &offload_base;
4419        int same_flow;
4420        enum gro_result ret;
4421        int grow;
4422
4423        if (!(skb->dev->features & NETIF_F_GRO))
4424                goto normal;
4425
4426        if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
4427                goto normal;
4428
4429        gro_list_prepare(napi, skb);
4430
4431        rcu_read_lock();
4432        list_for_each_entry_rcu(ptype, head, list) {
4433                if (ptype->type != type || !ptype->callbacks.gro_receive)
4434                        continue;
4435
4436                skb_set_network_header(skb, skb_gro_offset(skb));
4437                skb_reset_mac_len(skb);
4438                NAPI_GRO_CB(skb)->same_flow = 0;
4439                NAPI_GRO_CB(skb)->flush = 0;
4440                NAPI_GRO_CB(skb)->free = 0;
4441                NAPI_GRO_CB(skb)->encap_mark = 0;
4442                NAPI_GRO_CB(skb)->is_fou = 0;
4443                NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
4444
4445                /* Setup for GRO checksum validation */
4446                switch (skb->ip_summed) {
4447                case CHECKSUM_COMPLETE:
4448                        NAPI_GRO_CB(skb)->csum = skb->csum;
4449                        NAPI_GRO_CB(skb)->csum_valid = 1;
4450                        NAPI_GRO_CB(skb)->csum_cnt = 0;
4451                        break;
4452                case CHECKSUM_UNNECESSARY:
4453                        NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4454                        NAPI_GRO_CB(skb)->csum_valid = 0;
4455                        break;
4456                default:
4457                        NAPI_GRO_CB(skb)->csum_cnt = 0;
4458                        NAPI_GRO_CB(skb)->csum_valid = 0;
4459                }
4460
4461                pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
4462                break;
4463        }
4464        rcu_read_unlock();
4465
4466        if (&ptype->list == head)
4467                goto normal;
4468
4469        same_flow = NAPI_GRO_CB(skb)->same_flow;
4470        ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
4471
4472        if (pp) {
4473                struct sk_buff *nskb = *pp;
4474
4475                *pp = nskb->next;
4476                nskb->next = NULL;
4477                napi_gro_complete(nskb);
4478                napi->gro_count--;
4479        }
4480
4481        if (same_flow)
4482                goto ok;
4483
4484        if (NAPI_GRO_CB(skb)->flush)
4485                goto normal;
4486
4487        if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4488                struct sk_buff *nskb = napi->gro_list;
4489
4490                /* locate the end of the list to select the 'oldest' flow */
4491                while (nskb->next) {
4492                        pp = &nskb->next;
4493                        nskb = *pp;
4494                }
4495                *pp = NULL;
4496                nskb->next = NULL;
4497                napi_gro_complete(nskb);
4498        } else {
4499                napi->gro_count++;
4500        }
4501        NAPI_GRO_CB(skb)->count = 1;
4502        NAPI_GRO_CB(skb)->age = jiffies;
4503        NAPI_GRO_CB(skb)->last = skb;
4504        skb_shinfo(skb)->gso_size = skb_gro_len(skb);
4505        skb->next = napi->gro_list;
4506        napi->gro_list = skb;
4507        ret = GRO_HELD;
4508
4509pull:
4510        grow = skb_gro_offset(skb) - skb_headlen(skb);
4511        if (grow > 0)
4512                gro_pull_from_frag0(skb, grow);
4513ok:
4514        return ret;
4515
4516normal:
4517        ret = GRO_NORMAL;
4518        goto pull;
4519}
4520
4521struct packet_offload *gro_find_receive_by_type(__be16 type)
4522{
4523        struct list_head *offload_head = &offload_base;
4524        struct packet_offload *ptype;
4525
4526        list_for_each_entry_rcu(ptype, offload_head, list) {
4527                if (ptype->type != type || !ptype->callbacks.gro_receive)
4528                        continue;
4529                return ptype;
4530        }
4531        return NULL;
4532}
4533EXPORT_SYMBOL(gro_find_receive_by_type);
4534
4535struct packet_offload *gro_find_complete_by_type(__be16 type)
4536{
4537        struct list_head *offload_head = &offload_base;
4538        struct packet_offload *ptype;
4539
4540        list_for_each_entry_rcu(ptype, offload_head, list) {
4541                if (ptype->type != type || !ptype->callbacks.gro_complete)
4542                        continue;
4543                return ptype;
4544        }
4545        return NULL;
4546}
4547EXPORT_SYMBOL(gro_find_complete_by_type);
4548
4549static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
4550{
4551        switch (ret) {
4552        case GRO_NORMAL:
4553                if (netif_receive_skb_internal(skb))
4554                        ret = GRO_DROP;
4555                break;
4556
4557        case GRO_DROP:
4558                kfree_skb(skb);
4559                break;
4560
4561        case GRO_MERGED_FREE:
4562                if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
4563                        skb_dst_drop(skb);
4564                        kmem_cache_free(skbuff_head_cache, skb);
4565                } else {
4566                        __kfree_skb(skb);
4567                }
4568                break;
4569
4570        case GRO_HELD:
4571        case GRO_MERGED:
4572                break;
4573        }
4574
4575        return ret;
4576}
4577
4578gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4579{
4580        skb_mark_napi_id(skb, napi);
4581        trace_napi_gro_receive_entry(skb);
4582
4583        skb_gro_reset_offset(skb);
4584
4585        return napi_skb_finish(dev_gro_receive(napi, skb), skb);
4586}
4587EXPORT_SYMBOL(napi_gro_receive);
4588
4589static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
4590{
4591        if (unlikely(skb->pfmemalloc)) {
4592                consume_skb(skb);
4593                return;
4594        }
4595        __skb_pull(skb, skb_headlen(skb));
4596        /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4597        skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
4598        skb->vlan_tci = 0;
4599        skb->dev = napi->dev;
4600        skb->skb_iif = 0;
4601        skb->encapsulation = 0;
4602        skb_shinfo(skb)->gso_type = 0;
4603        skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
4604
4605        napi->skb = skb;
4606}
4607
4608struct sk_buff *napi_get_frags(struct napi_struct *napi)
4609{
4610        struct sk_buff *skb = napi->skb;
4611
4612        if (!skb) {
4613                skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
4614                if (skb) {
4615                        napi->skb = skb;
4616                        skb_mark_napi_id(skb, napi);
4617                }
4618        }
4619        return skb;
4620}
4621EXPORT_SYMBOL(napi_get_frags);
4622
4623static gro_result_t napi_frags_finish(struct napi_struct *napi,
4624                                      struct sk_buff *skb,
4625                                      gro_result_t ret)
4626{
4627        switch (ret) {
4628        case GRO_NORMAL:
4629        case GRO_HELD:
4630                __skb_push(skb, ETH_HLEN);
4631                skb->protocol = eth_type_trans(skb, skb->dev);
4632                if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
4633                        ret = GRO_DROP;
4634                break;
4635
4636        case GRO_DROP:
4637        case GRO_MERGED_FREE:
4638                napi_reuse_skb(napi, skb);
4639                break;
4640
4641        case GRO_MERGED:
4642                break;
4643        }
4644
4645        return ret;
4646}
4647
4648/* Upper GRO stack assumes network header starts at gro_offset=0
4649 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4650 * We copy ethernet header into skb->data to have a common layout.
4651 */
4652static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
4653{
4654        struct sk_buff *skb = napi->skb;
4655        const struct ethhdr *eth;
4656        unsigned int hlen = sizeof(*eth);
4657
4658        napi->skb = NULL;
4659
4660        skb_reset_mac_header(skb);
4661        skb_gro_reset_offset(skb);
4662
4663        eth = skb_gro_header_fast(skb, 0);
4664        if (unlikely(skb_gro_header_hard(skb, hlen))) {
4665                eth = skb_gro_header_slow(skb, hlen, 0);
4666                if (unlikely(!eth)) {
4667                        napi_reuse_skb(napi, skb);
4668                        return NULL;
4669                }
4670        } else {
4671                gro_pull_from_frag0(skb, hlen);
4672                NAPI_GRO_CB(skb)->frag0 += hlen;
4673                NAPI_GRO_CB(skb)->frag0_len -= hlen;
4674        }
4675        __skb_pull(skb, hlen);
4676
4677        /*
4678         * This works because the only protocols we care about don't require
4679         * special handling.
4680         * We'll fix it up properly in napi_frags_finish()
4681         */
4682        skb->protocol = eth->h_proto;
4683
4684        return skb;
4685}
4686
4687gro_result_t napi_gro_frags(struct napi_struct *napi)
4688{
4689        struct sk_buff *skb = napi_frags_skb(napi);
4690
4691        if (!skb)
4692                return GRO_DROP;
4693
4694        trace_napi_gro_frags_entry(skb);
4695
4696        return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
4697}
4698EXPORT_SYMBOL(napi_gro_frags);
4699
4700/* Compute the checksum from gro_offset and return the folded value
4701 * after adding in any pseudo checksum.
4702 */
4703__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4704{
4705        __wsum wsum;
4706        __sum16 sum;
4707
4708        wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4709
4710        /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4711        sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4712        if (likely(!sum)) {
4713                if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4714                    !skb->csum_complete_sw)
4715                        netdev_rx_csum_fault(skb->dev);
4716        }
4717
4718        NAPI_GRO_CB(skb)->csum = wsum;
4719        NAPI_GRO_CB(skb)->csum_valid = 1;
4720
4721        return sum;
4722}
4723EXPORT_SYMBOL(__skb_gro_checksum_complete);
4724
4725/*
4726 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
4727 * Note: called with local irq disabled, but exits with local irq enabled.
4728 */
4729static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4730{
4731#ifdef CONFIG_RPS
4732        struct softnet_data *remsd = sd->rps_ipi_list;
4733
4734        if (remsd) {
4735                sd->rps_ipi_list = NULL;
4736
4737                local_irq_enable();
4738
4739                /* Send pending IPI's to kick RPS processing on remote cpus. */
4740                while (remsd) {
4741                        struct softnet_data *next = remsd->rps_ipi_next;
4742
4743                        if (cpu_online(remsd->cpu))
4744                                smp_call_function_single_async(remsd->cpu,
4745                                                           &remsd->csd);
4746                        remsd = next;
4747                }
4748        } else
4749#endif
4750                local_irq_enable();
4751}
4752
4753static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
4754{
4755#ifdef CONFIG_RPS
4756        return sd->rps_ipi_list != NULL;
4757#else
4758        return false;
4759#endif
4760}
4761
4762static int process_backlog(struct napi_struct *napi, int quota)
4763{
4764        int work = 0;
4765        struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
4766
4767        /* Check if we have pending ipi, its better to send them now,
4768         * not waiting net_rx_action() end.
4769         */
4770        if (sd_has_rps_ipi_waiting(sd)) {
4771                local_irq_disable();
4772                net_rps_action_and_irq_enable(sd);
4773        }
4774
4775        napi->weight = weight_p;
4776        local_irq_disable();
4777        while (1) {
4778                struct sk_buff *skb;
4779
4780                while ((skb = __skb_dequeue(&sd->process_queue))) {
4781                        rcu_read_lock();
4782                        local_irq_enable();
4783                        __netif_receive_skb(skb);
4784                        rcu_read_unlock();
4785                        local_irq_disable();
4786                        input_queue_head_incr(sd);
4787                        if (++work >= quota) {
4788                                local_irq_enable();
4789                                return work;
4790                        }
4791                }
4792
4793                rps_lock(sd);
4794                if (skb_queue_empty(&sd->input_pkt_queue)) {
4795                        /*
4796                         * Inline a custom version of __napi_complete().
4797                         * only current cpu owns and manipulates this napi,
4798                         * and NAPI_STATE_SCHED is the only possible flag set
4799                         * on backlog.
4800                         * We can use a plain write instead of clear_bit(),
4801                         * and we dont need an smp_mb() memory barrier.
4802                         */
4803                        napi->state = 0;
4804                        rps_unlock(sd);
4805
4806                        break;
4807                }
4808
4809                skb_queue_splice_tail_init(&sd->input_pkt_queue,
4810                                           &sd->process_queue);
4811                rps_unlock(sd);
4812        }
4813        local_irq_enable();
4814
4815        return work;
4816}
4817
4818/**
4819 * __napi_schedule - schedule for receive
4820 * @n: entry to schedule
4821 *
4822 * The entry's receive function will be scheduled to run.
4823 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
4824 */
4825void __napi_schedule(struct napi_struct *n)
4826{
4827        unsigned long flags;
4828
4829        local_irq_save(flags);
4830        ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4831        local_irq_restore(flags);
4832}
4833EXPORT_SYMBOL(__napi_schedule);
4834
4835/**
4836 * __napi_schedule_irqoff - schedule for receive
4837 * @n: entry to schedule
4838 *
4839 * Variant of __napi_schedule() assuming hard irqs are masked
4840 */
4841void __napi_schedule_irqoff(struct napi_struct *n)
4842{
4843        ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4844}
4845EXPORT_SYMBOL(__napi_schedule_irqoff);
4846
4847void __napi_complete(struct napi_struct *n)
4848{
4849        BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4850
4851        list_del_init(&n->poll_list);
4852        smp_mb__before_atomic();
4853        clear_bit(NAPI_STATE_SCHED, &n->state);
4854}
4855EXPORT_SYMBOL(__napi_complete);
4856
4857void napi_complete_done(struct napi_struct *n, int work_done)
4858{
4859        unsigned long flags;
4860
4861        /*
4862         * don't let napi dequeue from the cpu poll list
4863         * just in case its running on a different cpu
4864         */
4865        if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4866                return;
4867
4868        if (n->gro_list) {
4869                unsigned long timeout = 0;
4870
4871                if (work_done)
4872                        timeout = n->dev->gro_flush_timeout;
4873
4874                if (timeout)
4875                        hrtimer_start(&n->timer, ns_to_ktime(timeout),
4876                                      HRTIMER_MODE_REL_PINNED);
4877                else
4878                        napi_gro_flush(n, false);
4879        }
4880        if (likely(list_empty(&n->poll_list))) {
4881                WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
4882        } else {
4883                /* If n->poll_list is not empty, we need to mask irqs */
4884                local_irq_save(flags);
4885                __napi_complete(n);
4886                local_irq_restore(flags);
4887        }
4888}
4889EXPORT_SYMBOL(napi_complete_done);
4890
4891/* must be called under rcu_read_lock(), as we dont take a reference */
4892static struct napi_struct *napi_by_id(unsigned int napi_id)
4893{
4894        unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4895        struct napi_struct *napi;
4896
4897        hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4898                if (napi->napi_id == napi_id)
4899                        return napi;
4900
4901        return NULL;
4902}
4903
4904#if defined(CONFIG_NET_RX_BUSY_POLL)
4905#define BUSY_POLL_BUDGET 8
4906bool sk_busy_loop(struct sock *sk, int nonblock)
4907{
4908        unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
4909        int (*busy_poll)(struct napi_struct *dev);
4910        struct napi_struct *napi;
4911        int rc = false;
4912
4913        rcu_read_lock();
4914
4915        napi = napi_by_id(sk->sk_napi_id);
4916        if (!napi)
4917                goto out;
4918
4919        /* Note: ndo_busy_poll method is optional in linux-4.5 */
4920        busy_poll = napi->dev->netdev_ops->ndo_busy_poll;
4921
4922        do {
4923                rc = 0;
4924                local_bh_disable();
4925                if (busy_poll) {
4926                        rc = busy_poll(napi);
4927                } else if (napi_schedule_prep(napi)) {
4928                        void *have = netpoll_poll_lock(napi);
4929
4930                        if (test_bit(NAPI_STATE_SCHED, &napi->state)) {
4931                                rc = napi->poll(napi, BUSY_POLL_BUDGET);
4932                                trace_napi_poll(napi);
4933                                if (rc == BUSY_POLL_BUDGET) {
4934                                        napi_complete_done(napi, rc);
4935                                        napi_schedule(napi);
4936                                }
4937                        }
4938                        netpoll_poll_unlock(have);
4939                }
4940                if (rc > 0)
4941                        NET_ADD_STATS_BH(sock_net(sk),
4942                                         LINUX_MIB_BUSYPOLLRXPACKETS, rc);
4943                local_bh_enable();
4944
4945                if (rc == LL_FLUSH_FAILED)
4946                        break; /* permanent failure */
4947
4948                cpu_relax();
4949        } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
4950                 !need_resched() && !busy_loop_timeout(end_time));
4951
4952        rc = !skb_queue_empty(&sk->sk_receive_queue);
4953out:
4954        rcu_read_unlock();
4955        return rc;
4956}
4957EXPORT_SYMBOL(sk_busy_loop);
4958
4959#endif /* CONFIG_NET_RX_BUSY_POLL */
4960
4961void napi_hash_add(struct napi_struct *napi)
4962{
4963        if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
4964            test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
4965                return;
4966
4967        spin_lock(&napi_hash_lock);
4968
4969        /* 0..NR_CPUS+1 range is reserved for sender_cpu use */
4970        do {
4971                if (unlikely(++napi_gen_id < NR_CPUS + 1))
4972                        napi_gen_id = NR_CPUS + 1;
4973        } while (napi_by_id(napi_gen_id));
4974        napi->napi_id = napi_gen_id;
4975
4976        hlist_add_head_rcu(&napi->napi_hash_node,
4977                           &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4978
4979        spin_unlock(&napi_hash_lock);
4980}
4981EXPORT_SYMBOL_GPL(napi_hash_add);
4982
4983/* Warning : caller is responsible to make sure rcu grace period
4984 * is respected before freeing memory containing @napi
4985 */
4986bool napi_hash_del(struct napi_struct *napi)
4987{
4988        bool rcu_sync_needed = false;
4989
4990        spin_lock(&napi_hash_lock);
4991
4992        if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
4993                rcu_sync_needed = true;
4994                hlist_del_rcu(&napi->napi_hash_node);
4995        }
4996        spin_unlock(&napi_hash_lock);
4997        return rcu_sync_needed;
4998}
4999EXPORT_SYMBOL_GPL(napi_hash_del);
5000
5001static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
5002{
5003        struct napi_struct *napi;
5004
5005        napi = container_of(timer, struct napi_struct, timer);
5006        if (napi->gro_list)
5007                napi_schedule(napi);
5008
5009        return HRTIMER_NORESTART;
5010}
5011
5012void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
5013                    int (*poll)(struct napi_struct *, int), int weight)
5014{
5015        INIT_LIST_HEAD(&napi->poll_list);
5016        hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5017        napi->timer.function = napi_watchdog;
5018        napi->gro_count = 0;
5019        napi->gro_list = NULL;
5020        napi->skb = NULL;
5021        napi->poll = poll;
5022        if (weight > NAPI_POLL_WEIGHT)
5023                pr_err_once("netif_napi_add() called with weight %d on device %s\n",
5024                            weight, dev->name);
5025        napi->weight = weight;
5026        list_add(&napi->dev_list, &dev->napi_list);
5027        napi->dev = dev;
5028#ifdef CONFIG_NETPOLL
5029        spin_lock_init(&napi->poll_lock);
5030        napi->poll_owner = -1;
5031#endif
5032        set_bit(NAPI_STATE_SCHED, &napi->state);
5033        napi_hash_add(napi);
5034}
5035EXPORT_SYMBOL(netif_napi_add);
5036
5037void napi_disable(struct napi_struct *n)
5038{
5039        might_sleep();
5040        set_bit(NAPI_STATE_DISABLE, &n->state);
5041
5042        while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
5043                msleep(1);
5044        while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
5045                msleep(1);
5046
5047        hrtimer_cancel(&n->timer);
5048
5049        clear_bit(NAPI_STATE_DISABLE, &n->state);
5050}
5051EXPORT_SYMBOL(napi_disable);
5052
5053/* Must be called in process context */
5054void netif_napi_del(struct napi_struct *napi)
5055{
5056        might_sleep();
5057        if (napi_hash_del(napi))
5058                synchronize_net();
5059        list_del_init(&napi->dev_list);
5060        napi_free_frags(napi);
5061
5062        kfree_skb_list(napi->gro_list);
5063        napi->gro_list = NULL;
5064        napi->gro_count = 0;
5065}
5066EXPORT_SYMBOL(netif_napi_del);
5067
5068static int napi_poll(struct napi_struct *n, struct list_head *repoll)
5069{
5070        void *have;
5071        int work, weight;
5072
5073        list_del_init(&n->poll_list);
5074
5075        have = netpoll_poll_lock(n);
5076
5077        weight = n->weight;
5078
5079        /* This NAPI_STATE_SCHED test is for avoiding a race
5080         * with netpoll's poll_napi().  Only the entity which
5081         * obtains the lock and sees NAPI_STATE_SCHED set will
5082         * actually make the ->poll() call.  Therefore we avoid
5083         * accidentally calling ->poll() when NAPI is not scheduled.
5084         */
5085        work = 0;
5086        if (test_bit(NAPI_STATE_SCHED, &n->state)) {
5087                work = n->poll(n, weight);
5088                trace_napi_poll(n);
5089        }
5090
5091        WARN_ON_ONCE(work > weight);
5092
5093        if (likely(work < weight))
5094                goto out_unlock;
5095
5096        /* Drivers must not modify the NAPI state if they
5097         * consume the entire weight.  In such cases this code
5098         * still "owns" the NAPI instance and therefore can
5099         * move the instance around on the list at-will.
5100         */
5101        if (unlikely(napi_disable_pending(n))) {
5102                napi_complete(n);
5103                goto out_unlock;
5104        }
5105
5106        if (n->gro_list) {
5107                /* flush too old packets
5108                 * If HZ < 1000, flush all packets.
5109                 */
5110                napi_gro_flush(n, HZ >= 1000);
5111        }
5112
5113        /* Some drivers may have called napi_schedule
5114         * prior to exhausting their budget.
5115         */
5116        if (unlikely(!list_empty(&n->poll_list))) {
5117                pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
5118                             n->dev ? n->dev->name : "backlog");
5119                goto out_unlock;
5120        }
5121
5122        list_add_tail(&n->poll_list, repoll);
5123
5124out_unlock:
5125        netpoll_poll_unlock(have);
5126
5127        return work;
5128}
5129
5130static void net_rx_action(struct softirq_action *h)
5131{
5132        struct softnet_data *sd = this_cpu_ptr(&softnet_data);
5133        unsigned long time_limit = jiffies + 2;
5134        int budget = netdev_budget;
5135        LIST_HEAD(list);
5136        LIST_HEAD(repoll);
5137
5138        local_irq_disable();
5139        list_splice_init(&sd->poll_list, &list);
5140        local_irq_enable();
5141
5142        for (;;) {
5143                struct napi_struct *n;
5144
5145                if (list_empty(&list)) {
5146                        if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
5147                                return;
5148                        break;
5149                }
5150
5151                n = list_first_entry(&list, struct napi_struct, poll_list);
5152                budget -= napi_poll(n, &repoll);
5153
5154                /* If softirq window is exhausted then punt.
5155                 * Allow this to run for 2 jiffies since which will allow
5156                 * an average latency of 1.5/HZ.
5157                 */
5158                if (unlikely(budget <= 0 ||
5159                             time_after_eq(jiffies, time_limit))) {
5160                        sd->time_squeeze++;
5161                        break;
5162                }
5163        }
5164
5165        __kfree_skb_flush();
5166        local_irq_disable();
5167
5168        list_splice_tail_init(&sd->poll_list, &list);
5169        list_splice_tail(&repoll, &list);
5170        list_splice(&list, &sd->poll_list);
5171        if (!list_empty(&sd->poll_list))
5172                __raise_softirq_irqoff(NET_RX_SOFTIRQ);
5173
5174        net_rps_action_and_irq_enable(sd);
5175}
5176
5177struct netdev_adjacent {
5178        struct net_device *dev;
5179
5180        /* upper master flag, there can only be one master device per list */
5181        bool master;
5182
5183        /* counter for the number of times this device was added to us */
5184        u16 ref_nr;
5185
5186        /* private field for the users */
5187        void *private;
5188
5189        struct list_head list;
5190        struct rcu_head rcu;
5191};
5192
5193static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
5194                                                 struct list_head *adj_list)
5195{
5196        struct netdev_adjacent *adj;
5197
5198        list_for_each_entry(adj, adj_list, list) {
5199                if (adj->dev == adj_dev)
5200                        return adj;
5201        }
5202        return NULL;
5203}
5204
5205/**
5206 * netdev_has_upper_dev - Check if device is linked to an upper device
5207 * @dev: device
5208 * @upper_dev: upper device to check
5209 *
5210 * Find out if a device is linked to specified upper device and return true
5211 * in case it is. Note that this checks only immediate upper device,
5212 * not through a complete stack of devices. The caller must hold the RTNL lock.
5213 */
5214bool netdev_has_upper_dev(struct net_device *dev,
5215                          struct net_device *upper_dev)
5216{
5217        ASSERT_RTNL();
5218
5219        return __netdev_find_adj(upper_dev, &dev->all_adj_list.upper);
5220}
5221EXPORT_SYMBOL(netdev_has_upper_dev);
5222
5223/**
5224 * netdev_has_any_upper_dev - Check if device is linked to some device
5225 * @dev: device
5226 *
5227 * Find out if a device is linked to an upper device and return true in case
5228 * it is. The caller must hold the RTNL lock.
5229 */
5230static bool netdev_has_any_upper_dev(struct net_device *dev)
5231{
5232        ASSERT_RTNL();
5233
5234        return !list_empty(&dev->all_adj_list.upper);
5235}
5236
5237/**
5238 * netdev_master_upper_dev_get - Get master upper device
5239 * @dev: device
5240 *
5241 * Find a master upper device and return pointer to it or NULL in case
5242 * it's not there. The caller must hold the RTNL lock.
5243 */
5244struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
5245{
5246        struct netdev_adjacent *upper;
5247
5248        ASSERT_RTNL();
5249
5250        if (list_empty(&dev->adj_list.upper))
5251                return NULL;
5252
5253        upper = list_first_entry(&dev->adj_list.upper,
5254                                 struct netdev_adjacent, list);
5255        if (likely(upper->master))
5256                return upper->dev;
5257        return NULL;
5258}
5259EXPORT_SYMBOL(netdev_master_upper_dev_get);
5260
5261void *netdev_adjacent_get_private(struct list_head *adj_list)
5262{
5263        struct netdev_adjacent *adj;
5264
5265        adj = list_entry(adj_list, struct netdev_adjacent, list);
5266
5267        return adj->private;
5268}
5269EXPORT_SYMBOL(netdev_adjacent_get_private);
5270
5271/**
5272 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
5273 * @dev: device
5274 * @iter: list_head ** of the current position
5275 *
5276 * Gets the next device from the dev's upper list, starting from iter
5277 * position. The caller must hold RCU read lock.
5278 */
5279struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
5280                                                 struct list_head **iter)
5281{
5282        struct netdev_adjacent *upper;
5283
5284        WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5285
5286        upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5287
5288        if (&upper->list == &dev->adj_list.upper)
5289                return NULL;
5290
5291        *iter = &upper->list;
5292
5293        return upper->dev;
5294}
5295EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
5296
5297/**
5298 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
5299 * @dev: device
5300 * @iter: list_head ** of the current position
5301 *
5302 * Gets the next device from the dev's upper list, starting from iter
5303 * position. The caller must hold RCU read lock.
5304 */
5305struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
5306                                                     struct list_head **iter)
5307{
5308        struct netdev_adjacent *upper;
5309
5310        WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5311
5312        upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5313
5314        if (&upper->list == &dev->all_adj_list.upper)
5315                return NULL;
5316
5317        *iter = &upper->list;
5318
5319        return upper->dev;
5320}
5321EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
5322
5323/**
5324 * netdev_lower_get_next_private - Get the next ->private from the
5325 *                                 lower neighbour list
5326 * @dev: device
5327 * @iter: list_head ** of the current position
5328 *
5329 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5330 * list, starting from iter position. The caller must hold either hold the
5331 * RTNL lock or its own locking that guarantees that the neighbour lower
5332 * list will remain unchanged.
5333 */
5334void *netdev_lower_get_next_private(struct net_device *dev,
5335                                    struct list_head **iter)
5336{
5337        struct netdev_adjacent *lower;
5338
5339        lower = list_entry(*iter, struct netdev_adjacent, list);
5340
5341        if (&lower->list == &dev->adj_list.lower)
5342                return NULL;
5343
5344        *iter = lower->list.next;
5345
5346        return lower->private;
5347}
5348EXPORT_SYMBOL(netdev_lower_get_next_private);
5349
5350/**
5351 * netdev_lower_get_next_private_rcu - Get the next ->private from the
5352 *                                     lower neighbour list, RCU
5353 *                                     variant
5354 * @dev: device
5355 * @iter: list_head ** of the current position
5356 *
5357 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5358 * list, starting from iter position. The caller must hold RCU read lock.
5359 */
5360void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5361                                        struct list_head **iter)
5362{
5363        struct netdev_adjacent *lower;
5364
5365        WARN_ON_ONCE(!rcu_read_lock_held());
5366
5367        lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5368
5369        if (&lower->list == &dev->adj_list.lower)
5370                return NULL;
5371
5372        *iter = &lower->list;
5373
5374        return lower->private;
5375}
5376EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
5377
5378/**
5379 * netdev_lower_get_next - Get the next device from the lower neighbour
5380 *                         list
5381 * @dev: device
5382 * @iter: list_head ** of the current position
5383 *
5384 * Gets the next netdev_adjacent from the dev's lower neighbour
5385 * list, starting from iter position. The caller must hold RTNL lock or
5386 * its own locking that guarantees that the neighbour lower
5387 * list will remain unchanged.
5388 */
5389void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5390{
5391        struct netdev_adjacent *lower;
5392
5393        lower = list_entry(*iter, struct netdev_adjacent, list);
5394
5395        if (&lower->list == &dev->adj_list.lower)
5396                return NULL;
5397
5398        *iter = lower->list.next;
5399
5400        return lower->dev;
5401}
5402EXPORT_SYMBOL(netdev_lower_get_next);
5403
5404/**
5405 * netdev_lower_get_first_private_rcu - Get the first ->private from the
5406 *                                     lower neighbour list, RCU
5407 *                                     variant
5408 * @dev: device
5409 *
5410 * Gets the first netdev_adjacent->private from the dev's lower neighbour
5411 * list. The caller must hold RCU read lock.
5412 */
5413void *netdev_lower_get_first_private_rcu(struct net_device *dev)
5414{
5415        struct netdev_adjacent *lower;
5416
5417        lower = list_first_or_null_rcu(&dev->adj_list.lower,
5418                        struct netdev_adjacent, list);
5419        if (lower)
5420                return lower->private;
5421        return NULL;
5422}
5423EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
5424
5425/**
5426 * netdev_master_upper_dev_get_rcu - Get master upper device
5427 * @dev: device
5428 *
5429 * Find a master upper device and return pointer to it or NULL in case
5430 * it's not there. The caller must hold the RCU read lock.
5431 */
5432struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
5433{
5434        struct netdev_adjacent *upper;
5435
5436        upper = list_first_or_null_rcu(&dev->adj_list.upper,
5437                                       struct netdev_adjacent, list);
5438        if (upper && likely(upper->master))
5439                return upper->dev;
5440        return NULL;
5441}
5442EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
5443
5444static int netdev_adjacent_sysfs_add(struct net_device *dev,
5445                              struct net_device *adj_dev,
5446                              struct list_head *dev_list)
5447{
5448        char linkname[IFNAMSIZ+7];
5449        sprintf(linkname, dev_list == &dev->adj_list.upper ?
5450                "upper_%s" : "lower_%s", adj_dev->name);
5451        return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
5452                                 linkname);
5453}
5454static void netdev_adjacent_sysfs_del(struct net_device *dev,
5455                               char *name,
5456                               struct list_head *dev_list)
5457{
5458        char linkname[IFNAMSIZ+7];
5459        sprintf(linkname, dev_list == &dev->adj_list.upper ?
5460                "upper_%s" : "lower_%s", name);
5461        sysfs_remove_link(&(dev->dev.kobj), linkname);
5462}
5463
5464static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
5465                                                 struct net_device *adj_dev,
5466                                                 struct list_head *dev_list)
5467{
5468        return (dev_list == &dev->adj_list.upper ||
5469                dev_list == &dev->adj_list.lower) &&
5470                net_eq(dev_net(dev), dev_net(adj_dev));
5471}
5472
5473static int __netdev_adjacent_dev_insert(struct net_device *dev,
5474                                        struct net_device *adj_dev,
5475                                        struct list_head *dev_list,
5476                                        void *private, bool master)
5477{
5478        struct netdev_adjacent *adj;
5479        int ret;
5480
5481        adj = __netdev_find_adj(adj_dev, dev_list);
5482
5483        if (adj) {
5484                adj->ref_nr++;
5485                return 0;
5486        }
5487
5488        adj = kmalloc(sizeof(*adj), GFP_KERNEL);
5489        if (!adj)
5490                return -ENOMEM;
5491
5492        adj->dev = adj_dev;
5493        adj->master = master;
5494        adj->ref_nr = 1;
5495        adj->private = private;
5496        dev_hold(adj_dev);
5497
5498        pr_debug("dev_hold for %s, because of link added from %s to %s\n",
5499                 adj_dev->name, dev->name, adj_dev->name);
5500
5501        if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
5502                ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
5503                if (ret)
5504                        goto free_adj;
5505        }
5506
5507        /* Ensure that master link is always the first item in list. */
5508        if (master) {
5509                ret = sysfs_create_link(&(dev->dev.kobj),
5510                                        &(adj_dev->dev.kobj), "master");
5511                if (ret)
5512                        goto remove_symlinks;
5513
5514                list_add_rcu(&adj->list, dev_list);
5515        } else {
5516                list_add_tail_rcu(&adj->list, dev_list);
5517        }
5518
5519        return 0;
5520
5521remove_symlinks:
5522        if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
5523                netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5524free_adj:
5525        kfree(adj);
5526        dev_put(adj_dev);
5527
5528        return ret;
5529}
5530
5531static void __netdev_adjacent_dev_remove(struct net_device *dev,
5532                                         struct net_device *adj_dev,
5533                                         struct list_head *dev_list)
5534{
5535        struct netdev_adjacent *adj;
5536
5537        adj = __netdev_find_adj(adj_dev, dev_list);
5538
5539        if (!adj) {
5540                pr_err("tried to remove device %s from %s\n",
5541                       dev->name, adj_dev->name);
5542                BUG();
5543        }
5544
5545        if (adj->ref_nr > 1) {
5546                pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
5547                         adj->ref_nr-1);
5548                adj->ref_nr--;
5549                return;
5550        }
5551
5552        if (adj->master)
5553                sysfs_remove_link(&(dev->dev.kobj), "master");
5554
5555        if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
5556                netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5557
5558        list_del_rcu(&adj->list);
5559        pr_debug("dev_put for %s, because link removed from %s to %s\n",
5560                 adj_dev->name, dev->name, adj_dev->name);
5561        dev_put(adj_dev);
5562        kfree_rcu(adj, rcu);
5563}
5564
5565static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
5566                                            struct net_device *upper_dev,
5567                                            struct list_head *up_list,
5568                                            struct list_head *down_list,
5569                                            void *private, bool master)
5570{
5571        int ret;
5572
5573        ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
5574                                           master);
5575        if (ret)
5576                return ret;
5577
5578        ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
5579                                           false);
5580        if (ret) {
5581                __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5582                return ret;
5583        }
5584
5585        return 0;
5586}
5587
5588static int __netdev_adjacent_dev_link(struct net_device *dev,
5589                                      struct net_device *upper_dev)
5590{
5591        return __netdev_adjacent_dev_link_lists(dev, upper_dev,
5592                                                &dev->all_adj_list.upper,
5593                                                &upper_dev->all_adj_list.lower,
5594                                                NULL, false);
5595}
5596
5597static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
5598                                               struct net_device *upper_dev,
5599                                               struct list_head *up_list,
5600                                               struct list_head *down_list)
5601{
5602        __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5603        __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
5604}
5605
5606static void __netdev_adjacent_dev_unlink(struct net_device *dev,
5607                                         struct net_device *upper_dev)
5608{
5609        __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5610                                           &dev->all_adj_list.upper,
5611                                           &upper_dev->all_adj_list.lower);
5612}
5613
5614static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
5615                                                struct net_device *upper_dev,
5616                                                void *private, bool master)
5617{
5618        int ret = __netdev_adjacent_dev_link(dev, upper_dev);
5619
5620        if (ret)
5621                return ret;
5622
5623        ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
5624                                               &dev->adj_list.upper,
5625                                               &upper_dev->adj_list.lower,
5626                                               private, master);
5627        if (ret) {
5628                __netdev_adjacent_dev_unlink(dev, upper_dev);
5629                return ret;
5630        }
5631
5632        return 0;
5633}
5634
5635static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
5636                                                   struct net_device *upper_dev)
5637{
5638        __netdev_adjacent_dev_unlink(dev, upper_dev);
5639        __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5640                                           &dev->adj_list.upper,
5641                                           &upper_dev->adj_list.lower);
5642}
5643
5644static int __netdev_upper_dev_link(struct net_device *dev,
5645                                   struct net_device *upper_dev, bool master,
5646                                   void *upper_priv, void *upper_info)
5647{
5648        struct netdev_notifier_changeupper_info changeupper_info;
5649        struct netdev_adjacent *i, *j, *to_i, *to_j;
5650        int ret = 0;
5651
5652        ASSERT_RTNL();
5653
5654        if (dev == upper_dev)
5655                return -EBUSY;
5656
5657        /* To prevent loops, check if dev is not upper device to upper_dev. */
5658        if (__netdev_find_adj(dev, &upper_dev->all_adj_list.upper))
5659                return -EBUSY;
5660
5661        if (__netdev_find_adj(upper_dev, &dev->adj_list.upper))
5662                return -EEXIST;
5663
5664        if (master && netdev_master_upper_dev_get(dev))
5665                return -EBUSY;
5666
5667        changeupper_info.upper_dev = upper_dev;
5668        changeupper_info.master = master;
5669        changeupper_info.linking = true;
5670        changeupper_info.upper_info = upper_info;
5671
5672        ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
5673                                            &changeupper_info.info);
5674        ret = notifier_to_errno(ret);
5675        if (ret)
5676                return ret;
5677
5678        ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
5679                                                   master);
5680        if (ret)