linux/include/linux/netdevice.h
<<
>>
Prefs
   1/*
   2 * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3 *              operating system.  INET is implemented using the  BSD Socket
   4 *              interface as the means of communication with the user level.
   5 *
   6 *              Definitions for the Interfaces handler.
   7 *
   8 * Version:     @(#)dev.h       1.0.10  08/12/93
   9 *
  10 * Authors:     Ross Biro
  11 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *              Corey Minyard <wf-rch!minyard@relay.EU.net>
  13 *              Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
  14 *              Alan Cox, <alan@lxorguk.ukuu.org.uk>
  15 *              Bjorn Ekwall. <bj0rn@blox.se>
  16 *              Pekka Riikonen <priikone@poseidon.pspt.fi>
  17 *
  18 *              This program is free software; you can redistribute it and/or
  19 *              modify it under the terms of the GNU General Public License
  20 *              as published by the Free Software Foundation; either version
  21 *              2 of the License, or (at your option) any later version.
  22 *
  23 *              Moved to /usr/include/linux for NET3
  24 */
  25#ifndef _LINUX_NETDEVICE_H
  26#define _LINUX_NETDEVICE_H
  27
  28#include <linux/pm_qos.h>
  29#include <linux/timer.h>
  30#include <linux/bug.h>
  31#include <linux/delay.h>
  32#include <linux/atomic.h>
  33#include <asm/cache.h>
  34#include <asm/byteorder.h>
  35
  36#include <linux/percpu.h>
  37#include <linux/rculist.h>
  38#include <linux/dmaengine.h>
  39#include <linux/workqueue.h>
  40#include <linux/dynamic_queue_limits.h>
  41
  42#include <linux/ethtool.h>
  43#include <net/net_namespace.h>
  44#include <net/dsa.h>
  45#ifdef CONFIG_DCB
  46#include <net/dcbnl.h>
  47#endif
  48#include <net/netprio_cgroup.h>
  49
  50#include <linux/netdev_features.h>
  51#include <linux/neighbour.h>
  52#include <uapi/linux/netdevice.h>
  53
  54struct netpoll_info;
  55struct device;
  56struct phy_device;
  57/* 802.11 specific */
  58struct wireless_dev;
  59                                        /* source back-compat hooks */
  60#define SET_ETHTOOL_OPS(netdev,ops) \
  61        ( (netdev)->ethtool_ops = (ops) )
  62
  63/* hardware address assignment types */
  64#define NET_ADDR_PERM           0       /* address is permanent (default) */
  65#define NET_ADDR_RANDOM         1       /* address is generated randomly */
  66#define NET_ADDR_STOLEN         2       /* address is stolen from other device */
  67
  68/* Backlog congestion levels */
  69#define NET_RX_SUCCESS          0       /* keep 'em coming, baby */
  70#define NET_RX_DROP             1       /* packet dropped */
  71
  72/*
  73 * Transmit return codes: transmit return codes originate from three different
  74 * namespaces:
  75 *
  76 * - qdisc return codes
  77 * - driver transmit return codes
  78 * - errno values
  79 *
  80 * Drivers are allowed to return any one of those in their hard_start_xmit()
  81 * function. Real network devices commonly used with qdiscs should only return
  82 * the driver transmit return codes though - when qdiscs are used, the actual
  83 * transmission happens asynchronously, so the value is not propagated to
  84 * higher layers. Virtual network devices transmit synchronously, in this case
  85 * the driver transmit return codes are consumed by dev_queue_xmit(), all
  86 * others are propagated to higher layers.
  87 */
  88
  89/* qdisc ->enqueue() return codes. */
  90#define NET_XMIT_SUCCESS        0x00
  91#define NET_XMIT_DROP           0x01    /* skb dropped                  */
  92#define NET_XMIT_CN             0x02    /* congestion notification      */
  93#define NET_XMIT_POLICED        0x03    /* skb is shot by police        */
  94#define NET_XMIT_MASK           0x0f    /* qdisc flags in net/sch_generic.h */
  95
  96/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
  97 * indicates that the device will soon be dropping packets, or already drops
  98 * some packets of the same priority; prompting us to send less aggressively. */
  99#define net_xmit_eval(e)        ((e) == NET_XMIT_CN ? 0 : (e))
 100#define net_xmit_errno(e)       ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
 101
 102/* Driver transmit return codes */
 103#define NETDEV_TX_MASK          0xf0
 104
 105enum netdev_tx {
 106        __NETDEV_TX_MIN  = INT_MIN,     /* make sure enum is signed */
 107        NETDEV_TX_OK     = 0x00,        /* driver took care of packet */
 108        NETDEV_TX_BUSY   = 0x10,        /* driver tx path was busy*/
 109        NETDEV_TX_LOCKED = 0x20,        /* driver tx lock was already taken */
 110};
 111typedef enum netdev_tx netdev_tx_t;
 112
 113/*
 114 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
 115 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
 116 */
 117static inline bool dev_xmit_complete(int rc)
 118{
 119        /*
 120         * Positive cases with an skb consumed by a driver:
 121         * - successful transmission (rc == NETDEV_TX_OK)
 122         * - error while transmitting (rc < 0)
 123         * - error while queueing to a different device (rc & NET_XMIT_MASK)
 124         */
 125        if (likely(rc < NET_XMIT_MASK))
 126                return true;
 127
 128        return false;
 129}
 130
 131/*
 132 *      Compute the worst case header length according to the protocols
 133 *      used.
 134 */
 135
 136#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
 137# if defined(CONFIG_MAC80211_MESH)
 138#  define LL_MAX_HEADER 128
 139# else
 140#  define LL_MAX_HEADER 96
 141# endif
 142#elif IS_ENABLED(CONFIG_TR)
 143# define LL_MAX_HEADER 48
 144#else
 145# define LL_MAX_HEADER 32
 146#endif
 147
 148#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
 149    !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
 150#define MAX_HEADER LL_MAX_HEADER
 151#else
 152#define MAX_HEADER (LL_MAX_HEADER + 48)
 153#endif
 154
 155/*
 156 *      Old network device statistics. Fields are native words
 157 *      (unsigned long) so they can be read and written atomically.
 158 */
 159
 160struct net_device_stats {
 161        unsigned long   rx_packets;
 162        unsigned long   tx_packets;
 163        unsigned long   rx_bytes;
 164        unsigned long   tx_bytes;
 165        unsigned long   rx_errors;
 166        unsigned long   tx_errors;
 167        unsigned long   rx_dropped;
 168        unsigned long   tx_dropped;
 169        unsigned long   multicast;
 170        unsigned long   collisions;
 171        unsigned long   rx_length_errors;
 172        unsigned long   rx_over_errors;
 173        unsigned long   rx_crc_errors;
 174        unsigned long   rx_frame_errors;
 175        unsigned long   rx_fifo_errors;
 176        unsigned long   rx_missed_errors;
 177        unsigned long   tx_aborted_errors;
 178        unsigned long   tx_carrier_errors;
 179        unsigned long   tx_fifo_errors;
 180        unsigned long   tx_heartbeat_errors;
 181        unsigned long   tx_window_errors;
 182        unsigned long   rx_compressed;
 183        unsigned long   tx_compressed;
 184};
 185
 186
 187#include <linux/cache.h>
 188#include <linux/skbuff.h>
 189
 190#ifdef CONFIG_RPS
 191#include <linux/static_key.h>
 192extern struct static_key rps_needed;
 193#endif
 194
 195struct neighbour;
 196struct neigh_parms;
 197struct sk_buff;
 198
 199struct netdev_hw_addr {
 200        struct list_head        list;
 201        unsigned char           addr[MAX_ADDR_LEN];
 202        unsigned char           type;
 203#define NETDEV_HW_ADDR_T_LAN            1
 204#define NETDEV_HW_ADDR_T_SAN            2
 205#define NETDEV_HW_ADDR_T_SLAVE          3
 206#define NETDEV_HW_ADDR_T_UNICAST        4
 207#define NETDEV_HW_ADDR_T_MULTICAST      5
 208        bool                    synced;
 209        bool                    global_use;
 210        int                     refcount;
 211        struct rcu_head         rcu_head;
 212};
 213
 214struct netdev_hw_addr_list {
 215        struct list_head        list;
 216        int                     count;
 217};
 218
 219#define netdev_hw_addr_list_count(l) ((l)->count)
 220#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
 221#define netdev_hw_addr_list_for_each(ha, l) \
 222        list_for_each_entry(ha, &(l)->list, list)
 223
 224#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
 225#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
 226#define netdev_for_each_uc_addr(ha, dev) \
 227        netdev_hw_addr_list_for_each(ha, &(dev)->uc)
 228
 229#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
 230#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
 231#define netdev_for_each_mc_addr(ha, dev) \
 232        netdev_hw_addr_list_for_each(ha, &(dev)->mc)
 233
 234struct hh_cache {
 235        u16             hh_len;
 236        u16             __pad;
 237        seqlock_t       hh_lock;
 238
 239        /* cached hardware header; allow for machine alignment needs.        */
 240#define HH_DATA_MOD     16
 241#define HH_DATA_OFF(__len) \
 242        (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
 243#define HH_DATA_ALIGN(__len) \
 244        (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
 245        unsigned long   hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
 246};
 247
 248/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
 249 * Alternative is:
 250 *   dev->hard_header_len ? (dev->hard_header_len +
 251 *                           (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
 252 *
 253 * We could use other alignment values, but we must maintain the
 254 * relationship HH alignment <= LL alignment.
 255 */
 256#define LL_RESERVED_SPACE(dev) \
 257        ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
 258#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
 259        ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
 260
 261struct header_ops {
 262        int     (*create) (struct sk_buff *skb, struct net_device *dev,
 263                           unsigned short type, const void *daddr,
 264                           const void *saddr, unsigned int len);
 265        int     (*parse)(const struct sk_buff *skb, unsigned char *haddr);
 266        int     (*rebuild)(struct sk_buff *skb);
 267        int     (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
 268        void    (*cache_update)(struct hh_cache *hh,
 269                                const struct net_device *dev,
 270                                const unsigned char *haddr);
 271};
 272
 273/* These flag bits are private to the generic network queueing
 274 * layer, they may not be explicitly referenced by any other
 275 * code.
 276 */
 277
 278enum netdev_state_t {
 279        __LINK_STATE_START,
 280        __LINK_STATE_PRESENT,
 281        __LINK_STATE_NOCARRIER,
 282        __LINK_STATE_LINKWATCH_PENDING,
 283        __LINK_STATE_DORMANT,
 284};
 285
 286
 287/*
 288 * This structure holds at boot time configured netdevice settings. They
 289 * are then used in the device probing.
 290 */
 291struct netdev_boot_setup {
 292        char name[IFNAMSIZ];
 293        struct ifmap map;
 294};
 295#define NETDEV_BOOT_SETUP_MAX 8
 296
 297extern int __init netdev_boot_setup(char *str);
 298
 299/*
 300 * Structure for NAPI scheduling similar to tasklet but with weighting
 301 */
 302struct napi_struct {
 303        /* The poll_list must only be managed by the entity which
 304         * changes the state of the NAPI_STATE_SCHED bit.  This means
 305         * whoever atomically sets that bit can add this napi_struct
 306         * to the per-cpu poll_list, and whoever clears that bit
 307         * can remove from the list right before clearing the bit.
 308         */
 309        struct list_head        poll_list;
 310
 311        unsigned long           state;
 312        int                     weight;
 313        unsigned int            gro_count;
 314        int                     (*poll)(struct napi_struct *, int);
 315#ifdef CONFIG_NETPOLL
 316        spinlock_t              poll_lock;
 317        int                     poll_owner;
 318#endif
 319        struct net_device       *dev;
 320        struct sk_buff          *gro_list;
 321        struct sk_buff          *skb;
 322        struct list_head        dev_list;
 323};
 324
 325enum {
 326        NAPI_STATE_SCHED,       /* Poll is scheduled */
 327        NAPI_STATE_DISABLE,     /* Disable pending */
 328        NAPI_STATE_NPSVC,       /* Netpoll - don't dequeue from poll_list */
 329};
 330
 331enum gro_result {
 332        GRO_MERGED,
 333        GRO_MERGED_FREE,
 334        GRO_HELD,
 335        GRO_NORMAL,
 336        GRO_DROP,
 337};
 338typedef enum gro_result gro_result_t;
 339
 340/*
 341 * enum rx_handler_result - Possible return values for rx_handlers.
 342 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
 343 * further.
 344 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
 345 * case skb->dev was changed by rx_handler.
 346 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
 347 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
 348 *
 349 * rx_handlers are functions called from inside __netif_receive_skb(), to do
 350 * special processing of the skb, prior to delivery to protocol handlers.
 351 *
 352 * Currently, a net_device can only have a single rx_handler registered. Trying
 353 * to register a second rx_handler will return -EBUSY.
 354 *
 355 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
 356 * To unregister a rx_handler on a net_device, use
 357 * netdev_rx_handler_unregister().
 358 *
 359 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
 360 * do with the skb.
 361 *
 362 * If the rx_handler consumed to skb in some way, it should return
 363 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
 364 * the skb to be delivered in some other ways.
 365 *
 366 * If the rx_handler changed skb->dev, to divert the skb to another
 367 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
 368 * new device will be called if it exists.
 369 *
 370 * If the rx_handler consider the skb should be ignored, it should return
 371 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
 372 * are registred on exact device (ptype->dev == skb->dev).
 373 *
 374 * If the rx_handler didn't changed skb->dev, but want the skb to be normally
 375 * delivered, it should return RX_HANDLER_PASS.
 376 *
 377 * A device without a registered rx_handler will behave as if rx_handler
 378 * returned RX_HANDLER_PASS.
 379 */
 380
 381enum rx_handler_result {
 382        RX_HANDLER_CONSUMED,
 383        RX_HANDLER_ANOTHER,
 384        RX_HANDLER_EXACT,
 385        RX_HANDLER_PASS,
 386};
 387typedef enum rx_handler_result rx_handler_result_t;
 388typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
 389
 390extern void __napi_schedule(struct napi_struct *n);
 391
 392static inline bool napi_disable_pending(struct napi_struct *n)
 393{
 394        return test_bit(NAPI_STATE_DISABLE, &n->state);
 395}
 396
 397/**
 398 *      napi_schedule_prep - check if napi can be scheduled
 399 *      @n: napi context
 400 *
 401 * Test if NAPI routine is already running, and if not mark
 402 * it as running.  This is used as a condition variable
 403 * insure only one NAPI poll instance runs.  We also make
 404 * sure there is no pending NAPI disable.
 405 */
 406static inline bool napi_schedule_prep(struct napi_struct *n)
 407{
 408        return !napi_disable_pending(n) &&
 409                !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
 410}
 411
 412/**
 413 *      napi_schedule - schedule NAPI poll
 414 *      @n: napi context
 415 *
 416 * Schedule NAPI poll routine to be called if it is not already
 417 * running.
 418 */
 419static inline void napi_schedule(struct napi_struct *n)
 420{
 421        if (napi_schedule_prep(n))
 422                __napi_schedule(n);
 423}
 424
 425/* Try to reschedule poll. Called by dev->poll() after napi_complete().  */
 426static inline bool napi_reschedule(struct napi_struct *napi)
 427{
 428        if (napi_schedule_prep(napi)) {
 429                __napi_schedule(napi);
 430                return true;
 431        }
 432        return false;
 433}
 434
 435/**
 436 *      napi_complete - NAPI processing complete
 437 *      @n: napi context
 438 *
 439 * Mark NAPI processing as complete.
 440 */
 441extern void __napi_complete(struct napi_struct *n);
 442extern void napi_complete(struct napi_struct *n);
 443
 444/**
 445 *      napi_disable - prevent NAPI from scheduling
 446 *      @n: napi context
 447 *
 448 * Stop NAPI from being scheduled on this context.
 449 * Waits till any outstanding processing completes.
 450 */
 451static inline void napi_disable(struct napi_struct *n)
 452{
 453        set_bit(NAPI_STATE_DISABLE, &n->state);
 454        while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
 455                msleep(1);
 456        clear_bit(NAPI_STATE_DISABLE, &n->state);
 457}
 458
 459/**
 460 *      napi_enable - enable NAPI scheduling
 461 *      @n: napi context
 462 *
 463 * Resume NAPI from being scheduled on this context.
 464 * Must be paired with napi_disable.
 465 */
 466static inline void napi_enable(struct napi_struct *n)
 467{
 468        BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
 469        smp_mb__before_clear_bit();
 470        clear_bit(NAPI_STATE_SCHED, &n->state);
 471}
 472
 473#ifdef CONFIG_SMP
 474/**
 475 *      napi_synchronize - wait until NAPI is not running
 476 *      @n: napi context
 477 *
 478 * Wait until NAPI is done being scheduled on this context.
 479 * Waits till any outstanding processing completes but
 480 * does not disable future activations.
 481 */
 482static inline void napi_synchronize(const struct napi_struct *n)
 483{
 484        while (test_bit(NAPI_STATE_SCHED, &n->state))
 485                msleep(1);
 486}
 487#else
 488# define napi_synchronize(n)    barrier()
 489#endif
 490
 491enum netdev_queue_state_t {
 492        __QUEUE_STATE_DRV_XOFF,
 493        __QUEUE_STATE_STACK_XOFF,
 494        __QUEUE_STATE_FROZEN,
 495#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF)             | \
 496                              (1 << __QUEUE_STATE_STACK_XOFF))
 497#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF            | \
 498                                        (1 << __QUEUE_STATE_FROZEN))
 499};
 500/*
 501 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue.  The
 502 * netif_tx_* functions below are used to manipulate this flag.  The
 503 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
 504 * queue independently.  The netif_xmit_*stopped functions below are called
 505 * to check if the queue has been stopped by the driver or stack (either
 506 * of the XOFF bits are set in the state).  Drivers should not need to call
 507 * netif_xmit*stopped functions, they should only be using netif_tx_*.
 508 */
 509
 510struct netdev_queue {
 511/*
 512 * read mostly part
 513 */
 514        struct net_device       *dev;
 515        struct Qdisc            *qdisc;
 516        struct Qdisc            *qdisc_sleeping;
 517#ifdef CONFIG_SYSFS
 518        struct kobject          kobj;
 519#endif
 520#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
 521        int                     numa_node;
 522#endif
 523/*
 524 * write mostly part
 525 */
 526        spinlock_t              _xmit_lock ____cacheline_aligned_in_smp;
 527        int                     xmit_lock_owner;
 528        /*
 529         * please use this field instead of dev->trans_start
 530         */
 531        unsigned long           trans_start;
 532
 533        /*
 534         * Number of TX timeouts for this queue
 535         * (/sys/class/net/DEV/Q/trans_timeout)
 536         */
 537        unsigned long           trans_timeout;
 538
 539        unsigned long           state;
 540
 541#ifdef CONFIG_BQL
 542        struct dql              dql;
 543#endif
 544} ____cacheline_aligned_in_smp;
 545
 546static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
 547{
 548#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
 549        return q->numa_node;
 550#else
 551        return NUMA_NO_NODE;
 552#endif
 553}
 554
 555static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
 556{
 557#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
 558        q->numa_node = node;
 559#endif
 560}
 561
 562#ifdef CONFIG_RPS
 563/*
 564 * This structure holds an RPS map which can be of variable length.  The
 565 * map is an array of CPUs.
 566 */
 567struct rps_map {
 568        unsigned int len;
 569        struct rcu_head rcu;
 570        u16 cpus[0];
 571};
 572#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
 573
 574/*
 575 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
 576 * tail pointer for that CPU's input queue at the time of last enqueue, and
 577 * a hardware filter index.
 578 */
 579struct rps_dev_flow {
 580        u16 cpu;
 581        u16 filter;
 582        unsigned int last_qtail;
 583};
 584#define RPS_NO_FILTER 0xffff
 585
 586/*
 587 * The rps_dev_flow_table structure contains a table of flow mappings.
 588 */
 589struct rps_dev_flow_table {
 590        unsigned int mask;
 591        struct rcu_head rcu;
 592        struct work_struct free_work;
 593        struct rps_dev_flow flows[0];
 594};
 595#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
 596    ((_num) * sizeof(struct rps_dev_flow)))
 597
 598/*
 599 * The rps_sock_flow_table contains mappings of flows to the last CPU
 600 * on which they were processed by the application (set in recvmsg).
 601 */
 602struct rps_sock_flow_table {
 603        unsigned int mask;
 604        u16 ents[0];
 605};
 606#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
 607    ((_num) * sizeof(u16)))
 608
 609#define RPS_NO_CPU 0xffff
 610
 611static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
 612                                        u32 hash)
 613{
 614        if (table && hash) {
 615                unsigned int cpu, index = hash & table->mask;
 616
 617                /* We only give a hint, preemption can change cpu under us */
 618                cpu = raw_smp_processor_id();
 619
 620                if (table->ents[index] != cpu)
 621                        table->ents[index] = cpu;
 622        }
 623}
 624
 625static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
 626                                       u32 hash)
 627{
 628        if (table && hash)
 629                table->ents[hash & table->mask] = RPS_NO_CPU;
 630}
 631
 632extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
 633
 634#ifdef CONFIG_RFS_ACCEL
 635extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
 636                                u32 flow_id, u16 filter_id);
 637#endif
 638
 639/* This structure contains an instance of an RX queue. */
 640struct netdev_rx_queue {
 641        struct rps_map __rcu            *rps_map;
 642        struct rps_dev_flow_table __rcu *rps_flow_table;
 643        struct kobject                  kobj;
 644        struct net_device               *dev;
 645} ____cacheline_aligned_in_smp;
 646#endif /* CONFIG_RPS */
 647
 648#ifdef CONFIG_XPS
 649/*
 650 * This structure holds an XPS map which can be of variable length.  The
 651 * map is an array of queues.
 652 */
 653struct xps_map {
 654        unsigned int len;
 655        unsigned int alloc_len;
 656        struct rcu_head rcu;
 657        u16 queues[0];
 658};
 659#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
 660#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map))    \
 661    / sizeof(u16))
 662
 663/*
 664 * This structure holds all XPS maps for device.  Maps are indexed by CPU.
 665 */
 666struct xps_dev_maps {
 667        struct rcu_head rcu;
 668        struct xps_map __rcu *cpu_map[0];
 669};
 670#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) +                \
 671    (nr_cpu_ids * sizeof(struct xps_map *)))
 672#endif /* CONFIG_XPS */
 673
 674#define TC_MAX_QUEUE    16
 675#define TC_BITMASK      15
 676/* HW offloaded queuing disciplines txq count and offset maps */
 677struct netdev_tc_txq {
 678        u16 count;
 679        u16 offset;
 680};
 681
 682#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
 683/*
 684 * This structure is to hold information about the device
 685 * configured to run FCoE protocol stack.
 686 */
 687struct netdev_fcoe_hbainfo {
 688        char    manufacturer[64];
 689        char    serial_number[64];
 690        char    hardware_version[64];
 691        char    driver_version[64];
 692        char    optionrom_version[64];
 693        char    firmware_version[64];
 694        char    model[256];
 695        char    model_description[256];
 696};
 697#endif
 698
 699/*
 700 * This structure defines the management hooks for network devices.
 701 * The following hooks can be defined; unless noted otherwise, they are
 702 * optional and can be filled with a null pointer.
 703 *
 704 * int (*ndo_init)(struct net_device *dev);
 705 *     This function is called once when network device is registered.
 706 *     The network device can use this to any late stage initializaton
 707 *     or semantic validattion. It can fail with an error code which will
 708 *     be propogated back to register_netdev
 709 *
 710 * void (*ndo_uninit)(struct net_device *dev);
 711 *     This function is called when device is unregistered or when registration
 712 *     fails. It is not called if init fails.
 713 *
 714 * int (*ndo_open)(struct net_device *dev);
 715 *     This function is called when network device transistions to the up
 716 *     state.
 717 *
 718 * int (*ndo_stop)(struct net_device *dev);
 719 *     This function is called when network device transistions to the down
 720 *     state.
 721 *
 722 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
 723 *                               struct net_device *dev);
 724 *      Called when a packet needs to be transmitted.
 725 *      Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
 726 *        (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
 727 *      Required can not be NULL.
 728 *
 729 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
 730 *      Called to decide which queue to when device supports multiple
 731 *      transmit queues.
 732 *
 733 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
 734 *      This function is called to allow device receiver to make
 735 *      changes to configuration when multicast or promiscious is enabled.
 736 *
 737 * void (*ndo_set_rx_mode)(struct net_device *dev);
 738 *      This function is called device changes address list filtering.
 739 *      If driver handles unicast address filtering, it should set
 740 *      IFF_UNICAST_FLT to its priv_flags.
 741 *
 742 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
 743 *      This function  is called when the Media Access Control address
 744 *      needs to be changed. If this interface is not defined, the
 745 *      mac address can not be changed.
 746 *
 747 * int (*ndo_validate_addr)(struct net_device *dev);
 748 *      Test if Media Access Control address is valid for the device.
 749 *
 750 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
 751 *      Called when a user request an ioctl which can't be handled by
 752 *      the generic interface code. If not defined ioctl's return
 753 *      not supported error code.
 754 *
 755 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
 756 *      Used to set network devices bus interface parameters. This interface
 757 *      is retained for legacy reason, new devices should use the bus
 758 *      interface (PCI) for low level management.
 759 *
 760 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
 761 *      Called when a user wants to change the Maximum Transfer Unit
 762 *      of a device. If not defined, any request to change MTU will
 763 *      will return an error.
 764 *
 765 * void (*ndo_tx_timeout)(struct net_device *dev);
 766 *      Callback uses when the transmitter has not made any progress
 767 *      for dev->watchdog ticks.
 768 *
 769 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
 770 *                      struct rtnl_link_stats64 *storage);
 771 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
 772 *      Called when a user wants to get the network device usage
 773 *      statistics. Drivers must do one of the following:
 774 *      1. Define @ndo_get_stats64 to fill in a zero-initialised
 775 *         rtnl_link_stats64 structure passed by the caller.
 776 *      2. Define @ndo_get_stats to update a net_device_stats structure
 777 *         (which should normally be dev->stats) and return a pointer to
 778 *         it. The structure may be changed asynchronously only if each
 779 *         field is written atomically.
 780 *      3. Update dev->stats asynchronously and atomically, and define
 781 *         neither operation.
 782 *
 783 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
 784 *      If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
 785 *      this function is called when a VLAN id is registered.
 786 *
 787 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
 788 *      If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
 789 *      this function is called when a VLAN id is unregistered.
 790 *
 791 * void (*ndo_poll_controller)(struct net_device *dev);
 792 *
 793 *      SR-IOV management functions.
 794 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
 795 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
 796 * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
 797 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
 798 * int (*ndo_get_vf_config)(struct net_device *dev,
 799 *                          int vf, struct ifla_vf_info *ivf);
 800 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
 801 *                        struct nlattr *port[]);
 802 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
 803 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
 804 *      Called to setup 'tc' number of traffic classes in the net device. This
 805 *      is always called from the stack with the rtnl lock held and netif tx
 806 *      queues stopped. This allows the netdevice to perform queue management
 807 *      safely.
 808 *
 809 *      Fiber Channel over Ethernet (FCoE) offload functions.
 810 * int (*ndo_fcoe_enable)(struct net_device *dev);
 811 *      Called when the FCoE protocol stack wants to start using LLD for FCoE
 812 *      so the underlying device can perform whatever needed configuration or
 813 *      initialization to support acceleration of FCoE traffic.
 814 *
 815 * int (*ndo_fcoe_disable)(struct net_device *dev);
 816 *      Called when the FCoE protocol stack wants to stop using LLD for FCoE
 817 *      so the underlying device can perform whatever needed clean-ups to
 818 *      stop supporting acceleration of FCoE traffic.
 819 *
 820 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
 821 *                           struct scatterlist *sgl, unsigned int sgc);
 822 *      Called when the FCoE Initiator wants to initialize an I/O that
 823 *      is a possible candidate for Direct Data Placement (DDP). The LLD can
 824 *      perform necessary setup and returns 1 to indicate the device is set up
 825 *      successfully to perform DDP on this I/O, otherwise this returns 0.
 826 *
 827 * int (*ndo_fcoe_ddp_done)(struct net_device *dev,  u16 xid);
 828 *      Called when the FCoE Initiator/Target is done with the DDPed I/O as
 829 *      indicated by the FC exchange id 'xid', so the underlying device can
 830 *      clean up and reuse resources for later DDP requests.
 831 *
 832 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
 833 *                            struct scatterlist *sgl, unsigned int sgc);
 834 *      Called when the FCoE Target wants to initialize an I/O that
 835 *      is a possible candidate for Direct Data Placement (DDP). The LLD can
 836 *      perform necessary setup and returns 1 to indicate the device is set up
 837 *      successfully to perform DDP on this I/O, otherwise this returns 0.
 838 *
 839 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
 840 *                             struct netdev_fcoe_hbainfo *hbainfo);
 841 *      Called when the FCoE Protocol stack wants information on the underlying
 842 *      device. This information is utilized by the FCoE protocol stack to
 843 *      register attributes with Fiber Channel management service as per the
 844 *      FC-GS Fabric Device Management Information(FDMI) specification.
 845 *
 846 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
 847 *      Called when the underlying device wants to override default World Wide
 848 *      Name (WWN) generation mechanism in FCoE protocol stack to pass its own
 849 *      World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
 850 *      protocol stack to use.
 851 *
 852 *      RFS acceleration.
 853 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
 854 *                          u16 rxq_index, u32 flow_id);
 855 *      Set hardware filter for RFS.  rxq_index is the target queue index;
 856 *      flow_id is a flow ID to be passed to rps_may_expire_flow() later.
 857 *      Return the filter ID on success, or a negative error code.
 858 *
 859 *      Slave management functions (for bridge, bonding, etc). User should
 860 *      call netdev_set_master() to set dev->master properly.
 861 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
 862 *      Called to make another netdev an underling.
 863 *
 864 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
 865 *      Called to release previously enslaved netdev.
 866 *
 867 *      Feature/offload setting functions.
 868 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
 869 *              netdev_features_t features);
 870 *      Adjusts the requested feature flags according to device-specific
 871 *      constraints, and returns the resulting flags. Must not modify
 872 *      the device state.
 873 *
 874 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
 875 *      Called to update device configuration to new features. Passed
 876 *      feature set might be less than what was returned by ndo_fix_features()).
 877 *      Must return >0 or -errno if it changed dev->features itself.
 878 *
 879 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
 880 *                    struct net_device *dev,
 881 *                    const unsigned char *addr, u16 flags)
 882 *      Adds an FDB entry to dev for addr.
 883 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct net_device *dev,
 884 *                    const unsigned char *addr)
 885 *      Deletes the FDB entry from dev coresponding to addr.
 886 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
 887 *                     struct net_device *dev, int idx)
 888 *      Used to add FDB entries to dump requests. Implementers should add
 889 *      entries to skb and update idx with the number of entries.
 890 */
 891struct net_device_ops {
 892        int                     (*ndo_init)(struct net_device *dev);
 893        void                    (*ndo_uninit)(struct net_device *dev);
 894        int                     (*ndo_open)(struct net_device *dev);
 895        int                     (*ndo_stop)(struct net_device *dev);
 896        netdev_tx_t             (*ndo_start_xmit) (struct sk_buff *skb,
 897                                                   struct net_device *dev);
 898        u16                     (*ndo_select_queue)(struct net_device *dev,
 899                                                    struct sk_buff *skb);
 900        void                    (*ndo_change_rx_flags)(struct net_device *dev,
 901                                                       int flags);
 902        void                    (*ndo_set_rx_mode)(struct net_device *dev);
 903        int                     (*ndo_set_mac_address)(struct net_device *dev,
 904                                                       void *addr);
 905        int                     (*ndo_validate_addr)(struct net_device *dev);
 906        int                     (*ndo_do_ioctl)(struct net_device *dev,
 907                                                struct ifreq *ifr, int cmd);
 908        int                     (*ndo_set_config)(struct net_device *dev,
 909                                                  struct ifmap *map);
 910        int                     (*ndo_change_mtu)(struct net_device *dev,
 911                                                  int new_mtu);
 912        int                     (*ndo_neigh_setup)(struct net_device *dev,
 913                                                   struct neigh_parms *);
 914        void                    (*ndo_tx_timeout) (struct net_device *dev);
 915
 916        struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
 917                                                     struct rtnl_link_stats64 *storage);
 918        struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
 919
 920        int                     (*ndo_vlan_rx_add_vid)(struct net_device *dev,
 921                                                       unsigned short vid);
 922        int                     (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
 923                                                        unsigned short vid);
 924#ifdef CONFIG_NET_POLL_CONTROLLER
 925        void                    (*ndo_poll_controller)(struct net_device *dev);
 926        int                     (*ndo_netpoll_setup)(struct net_device *dev,
 927                                                     struct netpoll_info *info,
 928                                                     gfp_t gfp);
 929        void                    (*ndo_netpoll_cleanup)(struct net_device *dev);
 930#endif
 931        int                     (*ndo_set_vf_mac)(struct net_device *dev,
 932                                                  int queue, u8 *mac);
 933        int                     (*ndo_set_vf_vlan)(struct net_device *dev,
 934                                                   int queue, u16 vlan, u8 qos);
 935        int                     (*ndo_set_vf_tx_rate)(struct net_device *dev,
 936                                                      int vf, int rate);
 937        int                     (*ndo_set_vf_spoofchk)(struct net_device *dev,
 938                                                       int vf, bool setting);
 939        int                     (*ndo_get_vf_config)(struct net_device *dev,
 940                                                     int vf,
 941                                                     struct ifla_vf_info *ivf);
 942        int                     (*ndo_set_vf_port)(struct net_device *dev,
 943                                                   int vf,
 944                                                   struct nlattr *port[]);
 945        int                     (*ndo_get_vf_port)(struct net_device *dev,
 946                                                   int vf, struct sk_buff *skb);
 947        int                     (*ndo_setup_tc)(struct net_device *dev, u8 tc);
 948#if IS_ENABLED(CONFIG_FCOE)
 949        int                     (*ndo_fcoe_enable)(struct net_device *dev);
 950        int                     (*ndo_fcoe_disable)(struct net_device *dev);
 951        int                     (*ndo_fcoe_ddp_setup)(struct net_device *dev,
 952                                                      u16 xid,
 953                                                      struct scatterlist *sgl,
 954                                                      unsigned int sgc);
 955        int                     (*ndo_fcoe_ddp_done)(struct net_device *dev,
 956                                                     u16 xid);
 957        int                     (*ndo_fcoe_ddp_target)(struct net_device *dev,
 958                                                       u16 xid,
 959                                                       struct scatterlist *sgl,
 960                                                       unsigned int sgc);
 961        int                     (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
 962                                                        struct netdev_fcoe_hbainfo *hbainfo);
 963#endif
 964
 965#if IS_ENABLED(CONFIG_LIBFCOE)
 966#define NETDEV_FCOE_WWNN 0
 967#define NETDEV_FCOE_WWPN 1
 968        int                     (*ndo_fcoe_get_wwn)(struct net_device *dev,
 969                                                    u64 *wwn, int type);
 970#endif
 971
 972#ifdef CONFIG_RFS_ACCEL
 973        int                     (*ndo_rx_flow_steer)(struct net_device *dev,
 974                                                     const struct sk_buff *skb,
 975                                                     u16 rxq_index,
 976                                                     u32 flow_id);
 977#endif
 978        int                     (*ndo_add_slave)(struct net_device *dev,
 979                                                 struct net_device *slave_dev);
 980        int                     (*ndo_del_slave)(struct net_device *dev,
 981                                                 struct net_device *slave_dev);
 982        netdev_features_t       (*ndo_fix_features)(struct net_device *dev,
 983                                                    netdev_features_t features);
 984        int                     (*ndo_set_features)(struct net_device *dev,
 985                                                    netdev_features_t features);
 986        int                     (*ndo_neigh_construct)(struct neighbour *n);
 987        void                    (*ndo_neigh_destroy)(struct neighbour *n);
 988
 989        int                     (*ndo_fdb_add)(struct ndmsg *ndm,
 990                                               struct nlattr *tb[],
 991                                               struct net_device *dev,
 992                                               const unsigned char *addr,
 993                                               u16 flags);
 994        int                     (*ndo_fdb_del)(struct ndmsg *ndm,
 995                                               struct net_device *dev,
 996                                               const unsigned char *addr);
 997        int                     (*ndo_fdb_dump)(struct sk_buff *skb,
 998                                                struct netlink_callback *cb,
 999                                                struct net_device *dev,
1000                                                int idx);
1001};
1002
1003/*
1004 *      The DEVICE structure.
1005 *      Actually, this whole structure is a big mistake.  It mixes I/O
1006 *      data with strictly "high-level" data, and it has to know about
1007 *      almost every data structure used in the INET module.
1008 *
1009 *      FIXME: cleanup struct net_device such that network protocol info
1010 *      moves out.
1011 */
1012
1013struct net_device {
1014
1015        /*
1016         * This is the first field of the "visible" part of this structure
1017         * (i.e. as seen by users in the "Space.c" file).  It is the name
1018         * of the interface.
1019         */
1020        char                    name[IFNAMSIZ];
1021
1022        /* device name hash chain, please keep it close to name[] */
1023        struct hlist_node       name_hlist;
1024
1025        /* snmp alias */
1026        char                    *ifalias;
1027
1028        /*
1029         *      I/O specific fields
1030         *      FIXME: Merge these and struct ifmap into one
1031         */
1032        unsigned long           mem_end;        /* shared mem end       */
1033        unsigned long           mem_start;      /* shared mem start     */
1034        unsigned long           base_addr;      /* device I/O address   */
1035        unsigned int            irq;            /* device IRQ number    */
1036
1037        /*
1038         *      Some hardware also needs these fields, but they are not
1039         *      part of the usual set specified in Space.c.
1040         */
1041
1042        unsigned long           state;
1043
1044        struct list_head        dev_list;
1045        struct list_head        napi_list;
1046        struct list_head        unreg_list;
1047
1048        /* currently active device features */
1049        netdev_features_t       features;
1050        /* user-changeable features */
1051        netdev_features_t       hw_features;
1052        /* user-requested features */
1053        netdev_features_t       wanted_features;
1054        /* mask of features inheritable by VLAN devices */
1055        netdev_features_t       vlan_features;
1056
1057        /* Interface index. Unique device identifier    */
1058        int                     ifindex;
1059        int                     iflink;
1060
1061        struct net_device_stats stats;
1062        atomic_long_t           rx_dropped; /* dropped packets by core network
1063                                             * Do not use this in drivers.
1064                                             */
1065
1066#ifdef CONFIG_WIRELESS_EXT
1067        /* List of functions to handle Wireless Extensions (instead of ioctl).
1068         * See <net/iw_handler.h> for details. Jean II */
1069        const struct iw_handler_def *   wireless_handlers;
1070        /* Instance data managed by the core of Wireless Extensions. */
1071        struct iw_public_data * wireless_data;
1072#endif
1073        /* Management operations */
1074        const struct net_device_ops *netdev_ops;
1075        const struct ethtool_ops *ethtool_ops;
1076
1077        /* Hardware header description */
1078        const struct header_ops *header_ops;
1079
1080        unsigned int            flags;  /* interface flags (a la BSD)   */
1081        unsigned int            priv_flags; /* Like 'flags' but invisible to userspace.
1082                                             * See if.h for definitions. */
1083        unsigned short          gflags;
1084        unsigned short          padded; /* How much padding added by alloc_netdev() */
1085
1086        unsigned char           operstate; /* RFC2863 operstate */
1087        unsigned char           link_mode; /* mapping policy to operstate */
1088
1089        unsigned char           if_port;        /* Selectable AUI, TP,..*/
1090        unsigned char           dma;            /* DMA channel          */
1091
1092        unsigned int            mtu;    /* interface MTU value          */
1093        unsigned short          type;   /* interface hardware type      */
1094        unsigned short          hard_header_len;        /* hardware hdr length  */
1095
1096        /* extra head- and tailroom the hardware may need, but not in all cases
1097         * can this be guaranteed, especially tailroom. Some cases also use
1098         * LL_MAX_HEADER instead to allocate the skb.
1099         */
1100        unsigned short          needed_headroom;
1101        unsigned short          needed_tailroom;
1102
1103        /* Interface address info. */
1104        unsigned char           perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
1105        unsigned char           addr_assign_type; /* hw address assignment type */
1106        unsigned char           addr_len;       /* hardware address length      */
1107        unsigned char           neigh_priv_len;
1108        unsigned short          dev_id;         /* for shared network cards */
1109
1110        spinlock_t              addr_list_lock;
1111        struct netdev_hw_addr_list      uc;     /* Unicast mac addresses */
1112        struct netdev_hw_addr_list      mc;     /* Multicast mac addresses */
1113        bool                    uc_promisc;
1114        unsigned int            promiscuity;
1115        unsigned int            allmulti;
1116
1117
1118        /* Protocol specific pointers */
1119
1120#if IS_ENABLED(CONFIG_VLAN_8021Q)
1121        struct vlan_info __rcu  *vlan_info;     /* VLAN info */
1122#endif
1123#if IS_ENABLED(CONFIG_NET_DSA)
1124        struct dsa_switch_tree  *dsa_ptr;       /* dsa specific data */
1125#endif
1126        void                    *atalk_ptr;     /* AppleTalk link       */
1127        struct in_device __rcu  *ip_ptr;        /* IPv4 specific data   */
1128        struct dn_dev __rcu     *dn_ptr;        /* DECnet specific data */
1129        struct inet6_dev __rcu  *ip6_ptr;       /* IPv6 specific data */
1130        void                    *ax25_ptr;      /* AX.25 specific data */
1131        struct wireless_dev     *ieee80211_ptr; /* IEEE 802.11 specific data,
1132                                                   assign before registering */
1133
1134/*
1135 * Cache lines mostly used on receive path (including eth_type_trans())
1136 */
1137        unsigned long           last_rx;        /* Time of last Rx
1138                                                 * This should not be set in
1139                                                 * drivers, unless really needed,
1140                                                 * because network stack (bonding)
1141                                                 * use it if/when necessary, to
1142                                                 * avoid dirtying this cache line.
1143                                                 */
1144
1145        struct net_device       *master; /* Pointer to master device of a group,
1146                                          * which this device is member of.
1147                                          */
1148
1149        /* Interface address info used in eth_type_trans() */
1150        unsigned char           *dev_addr;      /* hw address, (before bcast
1151                                                   because most packets are
1152                                                   unicast) */
1153
1154        struct netdev_hw_addr_list      dev_addrs; /* list of device
1155                                                      hw addresses */
1156
1157        unsigned char           broadcast[MAX_ADDR_LEN];        /* hw bcast add */
1158
1159#ifdef CONFIG_SYSFS
1160        struct kset             *queues_kset;
1161#endif
1162
1163#ifdef CONFIG_RPS
1164        struct netdev_rx_queue  *_rx;
1165
1166        /* Number of RX queues allocated at register_netdev() time */
1167        unsigned int            num_rx_queues;
1168
1169        /* Number of RX queues currently active in device */
1170        unsigned int            real_num_rx_queues;
1171
1172#ifdef CONFIG_RFS_ACCEL
1173        /* CPU reverse-mapping for RX completion interrupts, indexed
1174         * by RX queue number.  Assigned by driver.  This must only be
1175         * set if the ndo_rx_flow_steer operation is defined. */
1176        struct cpu_rmap         *rx_cpu_rmap;
1177#endif
1178#endif
1179
1180        rx_handler_func_t __rcu *rx_handler;
1181        void __rcu              *rx_handler_data;
1182
1183        struct netdev_queue __rcu *ingress_queue;
1184
1185/*
1186 * Cache lines mostly used on transmit path
1187 */
1188        struct netdev_queue     *_tx ____cacheline_aligned_in_smp;
1189
1190        /* Number of TX queues allocated at alloc_netdev_mq() time  */
1191        unsigned int            num_tx_queues;
1192
1193        /* Number of TX queues currently active in device  */
1194        unsigned int            real_num_tx_queues;
1195
1196        /* root qdisc from userspace point of view */
1197        struct Qdisc            *qdisc;
1198
1199        unsigned long           tx_queue_len;   /* Max frames per queue allowed */
1200        spinlock_t              tx_global_lock;
1201
1202#ifdef CONFIG_XPS
1203        struct xps_dev_maps __rcu *xps_maps;
1204#endif
1205
1206        /* These may be needed for future network-power-down code. */
1207
1208        /*
1209         * trans_start here is expensive for high speed devices on SMP,
1210         * please use netdev_queue->trans_start instead.
1211         */
1212        unsigned long           trans_start;    /* Time (in jiffies) of last Tx */
1213
1214        int                     watchdog_timeo; /* used by dev_watchdog() */
1215        struct timer_list       watchdog_timer;
1216
1217        /* Number of references to this device */
1218        int __percpu            *pcpu_refcnt;
1219
1220        /* delayed register/unregister */
1221        struct list_head        todo_list;
1222        /* device index hash chain */
1223        struct hlist_node       index_hlist;
1224
1225        struct list_head        link_watch_list;
1226
1227        /* register/unregister state machine */
1228        enum { NETREG_UNINITIALIZED=0,
1229               NETREG_REGISTERED,       /* completed register_netdevice */
1230               NETREG_UNREGISTERING,    /* called unregister_netdevice */
1231               NETREG_UNREGISTERED,     /* completed unregister todo */
1232               NETREG_RELEASED,         /* called free_netdev */
1233               NETREG_DUMMY,            /* dummy device for NAPI poll */
1234        } reg_state:8;
1235
1236        bool dismantle; /* device is going do be freed */
1237
1238        enum {
1239                RTNL_LINK_INITIALIZED,
1240                RTNL_LINK_INITIALIZING,
1241        } rtnl_link_state:16;
1242
1243        /* Called from unregister, can be used to call free_netdev */
1244        void (*destructor)(struct net_device *dev);
1245
1246#ifdef CONFIG_NETPOLL
1247        struct netpoll_info     *npinfo;
1248#endif
1249
1250#ifdef CONFIG_NET_NS
1251        /* Network namespace this network device is inside */
1252        struct net              *nd_net;
1253#endif
1254
1255        /* mid-layer private */
1256        union {
1257                void                            *ml_priv;
1258                struct pcpu_lstats __percpu     *lstats; /* loopback stats */
1259                struct pcpu_tstats __percpu     *tstats; /* tunnel stats */
1260                struct pcpu_dstats __percpu     *dstats; /* dummy stats */
1261        };
1262        /* GARP */
1263        struct garp_port __rcu  *garp_port;
1264
1265        /* class/net/name entry */
1266        struct device           dev;
1267        /* space for optional device, statistics, and wireless sysfs groups */
1268        const struct attribute_group *sysfs_groups[4];
1269
1270        /* rtnetlink link ops */
1271        const struct rtnl_link_ops *rtnl_link_ops;
1272
1273        /* for setting kernel sock attribute on TCP connection setup */
1274#define GSO_MAX_SIZE            65536
1275        unsigned int            gso_max_size;
1276#define GSO_MAX_SEGS            65535
1277        u16                     gso_max_segs;
1278
1279#ifdef CONFIG_DCB
1280        /* Data Center Bridging netlink ops */
1281        const struct dcbnl_rtnl_ops *dcbnl_ops;
1282#endif
1283        u8 num_tc;
1284        struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1285        u8 prio_tc_map[TC_BITMASK + 1];
1286
1287#if IS_ENABLED(CONFIG_FCOE)
1288        /* max exchange id for FCoE LRO by ddp */
1289        unsigned int            fcoe_ddp_xid;
1290#endif
1291#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
1292        struct netprio_map __rcu *priomap;
1293#endif
1294        /* phy device may attach itself for hardware timestamping */
1295        struct phy_device *phydev;
1296
1297        struct lock_class_key *qdisc_tx_busylock;
1298
1299        /* group the device belongs to */
1300        int group;
1301
1302        struct pm_qos_request   pm_qos_req;
1303};
1304#define to_net_dev(d) container_of(d, struct net_device, dev)
1305
1306#define NETDEV_ALIGN            32
1307
1308static inline
1309int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1310{
1311        return dev->prio_tc_map[prio & TC_BITMASK];
1312}
1313
1314static inline
1315int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1316{
1317        if (tc >= dev->num_tc)
1318                return -EINVAL;
1319
1320        dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1321        return 0;
1322}
1323
1324static inline
1325void netdev_reset_tc(struct net_device *dev)
1326{
1327        dev->num_tc = 0;
1328        memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
1329        memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
1330}
1331
1332static inline
1333int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
1334{
1335        if (tc >= dev->num_tc)
1336                return -EINVAL;
1337
1338        dev->tc_to_txq[tc].count = count;
1339        dev->tc_to_txq[tc].offset = offset;
1340        return 0;
1341}
1342
1343static inline
1344int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
1345{
1346        if (num_tc > TC_MAX_QUEUE)
1347                return -EINVAL;
1348
1349        dev->num_tc = num_tc;
1350        return 0;
1351}
1352
1353static inline
1354int netdev_get_num_tc(struct net_device *dev)
1355{
1356        return dev->num_tc;
1357}
1358
1359static inline
1360struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1361                                         unsigned int index)
1362{
1363        return &dev->_tx[index];
1364}
1365
1366static inline void netdev_for_each_tx_queue(struct net_device *dev,
1367                                            void (*f)(struct net_device *,
1368                                                      struct netdev_queue *,
1369                                                      void *),
1370                                            void *arg)
1371{
1372        unsigned int i;
1373
1374        for (i = 0; i < dev->num_tx_queues; i++)
1375                f(dev, &dev->_tx[i], arg);
1376}
1377
1378extern struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1379                                           struct sk_buff *skb);
1380
1381/*
1382 * Net namespace inlines
1383 */
1384static inline
1385struct net *dev_net(const struct net_device *dev)
1386{
1387        return read_pnet(&dev->nd_net);
1388}
1389
1390static inline
1391void dev_net_set(struct net_device *dev, struct net *net)
1392{
1393#ifdef CONFIG_NET_NS
1394        release_net(dev->nd_net);
1395        dev->nd_net = hold_net(net);
1396#endif
1397}
1398
1399static inline bool netdev_uses_dsa_tags(struct net_device *dev)
1400{
1401#ifdef CONFIG_NET_DSA_TAG_DSA
1402        if (dev->dsa_ptr != NULL)
1403                return dsa_uses_dsa_tags(dev->dsa_ptr);
1404#endif
1405
1406        return 0;
1407}
1408
1409static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1410{
1411#ifdef CONFIG_NET_DSA_TAG_TRAILER
1412        if (dev->dsa_ptr != NULL)
1413                return dsa_uses_trailer_tags(dev->dsa_ptr);
1414#endif
1415
1416        return 0;
1417}
1418
1419/**
1420 *      netdev_priv - access network device private data
1421 *      @dev: network device
1422 *
1423 * Get network device private data
1424 */
1425static inline void *netdev_priv(const struct net_device *dev)
1426{
1427        return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
1428}
1429
1430/* Set the sysfs physical device reference for the network logical device
1431 * if set prior to registration will cause a symlink during initialization.
1432 */
1433#define SET_NETDEV_DEV(net, pdev)       ((net)->dev.parent = (pdev))
1434
1435/* Set the sysfs device type for the network logical device to allow
1436 * fin grained indentification of different network device types. For
1437 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
1438 */
1439#define SET_NETDEV_DEVTYPE(net, devtype)        ((net)->dev.type = (devtype))
1440
1441/**
1442 *      netif_napi_add - initialize a napi context
1443 *      @dev:  network device
1444 *      @napi: napi context
1445 *      @poll: polling function
1446 *      @weight: default weight
1447 *
1448 * netif_napi_add() must be used to initialize a napi context prior to calling
1449 * *any* of the other napi related functions.
1450 */
1451void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1452                    int (*poll)(struct napi_struct *, int), int weight);
1453
1454/**
1455 *  netif_napi_del - remove a napi context
1456 *  @napi: napi context
1457 *
1458 *  netif_napi_del() removes a napi context from the network device napi list
1459 */
1460void netif_napi_del(struct napi_struct *napi);
1461
1462struct napi_gro_cb {
1463        /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
1464        void *frag0;
1465
1466        /* Length of frag0. */
1467        unsigned int frag0_len;
1468
1469        /* This indicates where we are processing relative to skb->data. */
1470        int data_offset;
1471
1472        /* This is non-zero if the packet cannot be merged with the new skb. */
1473        int flush;
1474
1475        /* Number of segments aggregated. */
1476        u16     count;
1477
1478        /* This is non-zero if the packet may be of the same flow. */
1479        u8      same_flow;
1480
1481        /* Free the skb? */
1482        u8      free;
1483#define NAPI_GRO_FREE             1
1484#define NAPI_GRO_FREE_STOLEN_HEAD 2
1485
1486        /* jiffies when first packet was created/queued */
1487        unsigned long age;
1488
1489        /* Used in ipv6_gro_receive() */
1490        int     proto;
1491
1492        /* used in skb_gro_receive() slow path */
1493        struct sk_buff *last;
1494};
1495
1496#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1497
1498struct packet_type {
1499        __be16                  type;   /* This is really htons(ether_type). */
1500        struct net_device       *dev;   /* NULL is wildcarded here           */
1501        int                     (*func) (struct sk_buff *,
1502                                         struct net_device *,
1503                                         struct packet_type *,
1504                                         struct net_device *);
1505        struct sk_buff          *(*gso_segment)(struct sk_buff *skb,
1506                                                netdev_features_t features);
1507        int                     (*gso_send_check)(struct sk_buff *skb);
1508        struct sk_buff          **(*gro_receive)(struct sk_buff **head,
1509                                               struct sk_buff *skb);
1510        int                     (*gro_complete)(struct sk_buff *skb);
1511        bool                    (*id_match)(struct packet_type *ptype,
1512                                            struct sock *sk);
1513        void                    *af_packet_priv;
1514        struct list_head        list;
1515};
1516
1517#include <linux/notifier.h>
1518
1519/* netdevice notifier chain. Please remember to update the rtnetlink
1520 * notification exclusion list in rtnetlink_event() when adding new
1521 * types.
1522 */
1523#define NETDEV_UP       0x0001  /* For now you can't veto a device up/down */
1524#define NETDEV_DOWN     0x0002
1525#define NETDEV_REBOOT   0x0003  /* Tell a protocol stack a network interface
1526                                   detected a hardware crash and restarted
1527                                   - we can use this eg to kick tcp sessions
1528                                   once done */
1529#define NETDEV_CHANGE   0x0004  /* Notify device state change */
1530#define NETDEV_REGISTER 0x0005
1531#define NETDEV_UNREGISTER       0x0006
1532#define NETDEV_CHANGEMTU        0x0007
1533#define NETDEV_CHANGEADDR       0x0008
1534#define NETDEV_GOING_DOWN       0x0009
1535#define NETDEV_CHANGENAME       0x000A
1536#define NETDEV_FEAT_CHANGE      0x000B
1537#define NETDEV_BONDING_FAILOVER 0x000C
1538#define NETDEV_PRE_UP           0x000D
1539#define NETDEV_PRE_TYPE_CHANGE  0x000E
1540#define NETDEV_POST_TYPE_CHANGE 0x000F
1541#define NETDEV_POST_INIT        0x0010
1542#define NETDEV_UNREGISTER_FINAL 0x0011
1543#define NETDEV_RELEASE          0x0012
1544#define NETDEV_NOTIFY_PEERS     0x0013
1545#define NETDEV_JOIN             0x0014
1546
1547extern int register_netdevice_notifier(struct notifier_block *nb);
1548extern int unregister_netdevice_notifier(struct notifier_block *nb);
1549extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1550
1551
1552extern rwlock_t                         dev_base_lock;          /* Device list lock */
1553
1554
1555#define for_each_netdev(net, d)         \
1556                list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1557#define for_each_netdev_reverse(net, d) \
1558                list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
1559#define for_each_netdev_rcu(net, d)             \
1560                list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
1561#define for_each_netdev_safe(net, d, n) \
1562                list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1563#define for_each_netdev_continue(net, d)                \
1564                list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1565#define for_each_netdev_continue_rcu(net, d)            \
1566        list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
1567#define net_device_entry(lh)    list_entry(lh, struct net_device, dev_list)
1568
1569static inline struct net_device *next_net_device(struct net_device *dev)
1570{
1571        struct list_head *lh;
1572        struct net *net;
1573
1574        net = dev_net(dev);
1575        lh = dev->dev_list.next;
1576        return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1577}
1578
1579static inline struct net_device *next_net_device_rcu(struct net_device *dev)
1580{
1581        struct list_head *lh;
1582        struct net *net;
1583
1584        net = dev_net(dev);
1585        lh = rcu_dereference(list_next_rcu(&dev->dev_list));
1586        return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1587}
1588
1589static inline struct net_device *first_net_device(struct net *net)
1590{
1591        return list_empty(&net->dev_base_head) ? NULL :
1592                net_device_entry(net->dev_base_head.next);
1593}
1594
1595static inline struct net_device *first_net_device_rcu(struct net *net)
1596{
1597        struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
1598
1599        return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1600}
1601
1602extern int                      netdev_boot_setup_check(struct net_device *dev);
1603extern unsigned long            netdev_boot_base(const char *prefix, int unit);
1604extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1605                                              const char *hwaddr);
1606extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1607extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1608extern void             dev_add_pack(struct packet_type *pt);
1609extern void             dev_remove_pack(struct packet_type *pt);
1610extern void             __dev_remove_pack(struct packet_type *pt);
1611
1612extern struct net_device        *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
1613                                                      unsigned short mask);
1614extern struct net_device        *dev_get_by_name(struct net *net, const char *name);
1615extern struct net_device        *dev_get_by_name_rcu(struct net *net, const char *name);
1616extern struct net_device        *__dev_get_by_name(struct net *net, const char *name);
1617extern int              dev_alloc_name(struct net_device *dev, const char *name);
1618extern int              dev_open(struct net_device *dev);
1619extern int              dev_close(struct net_device *dev);
1620extern void             dev_disable_lro(struct net_device *dev);
1621extern int              dev_loopback_xmit(struct sk_buff *newskb);
1622extern int              dev_queue_xmit(struct sk_buff *skb);
1623extern int              register_netdevice(struct net_device *dev);
1624extern void             unregister_netdevice_queue(struct net_device *dev,
1625                                                   struct list_head *head);
1626extern void             unregister_netdevice_many(struct list_head *head);
1627static inline void unregister_netdevice(struct net_device *dev)
1628{
1629        unregister_netdevice_queue(dev, NULL);
1630}
1631
1632extern int              netdev_refcnt_read(const struct net_device *dev);
1633extern void             free_netdev(struct net_device *dev);
1634extern void             synchronize_net(void);
1635extern int              init_dummy_netdev(struct net_device *dev);
1636extern void             netdev_resync_ops(struct net_device *dev);
1637
1638extern struct net_device        *dev_get_by_index(struct net *net, int ifindex);
1639extern struct net_device        *__dev_get_by_index(struct net *net, int ifindex);
1640extern struct net_device        *dev_get_by_index_rcu(struct net *net, int ifindex);
1641extern int              dev_restart(struct net_device *dev);
1642#ifdef CONFIG_NETPOLL_TRAP
1643extern int              netpoll_trap(void);
1644#endif
1645extern int             skb_gro_receive(struct sk_buff **head,
1646                                       struct sk_buff *skb);
1647
1648static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1649{
1650        return NAPI_GRO_CB(skb)->data_offset;
1651}
1652
1653static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1654{
1655        return skb->len - NAPI_GRO_CB(skb)->data_offset;
1656}
1657
1658static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1659{
1660        NAPI_GRO_CB(skb)->data_offset += len;
1661}
1662
1663static inline void *skb_gro_header_fast(struct sk_buff *skb,
1664                                        unsigned int offset)
1665{
1666        return NAPI_GRO_CB(skb)->frag0 + offset;
1667}
1668
1669static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1670{
1671        return NAPI_GRO_CB(skb)->frag0_len < hlen;
1672}
1673
1674static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1675                                        unsigned int offset)
1676{
1677        if (!pskb_may_pull(skb, hlen))
1678                return NULL;
1679
1680        NAPI_GRO_CB(skb)->frag0 = NULL;
1681        NAPI_GRO_CB(skb)->frag0_len = 0;
1682        return skb->data + offset;
1683}
1684
1685static inline void *skb_gro_mac_header(struct sk_buff *skb)
1686{
1687        return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
1688}
1689
1690static inline void *skb_gro_network_header(struct sk_buff *skb)
1691{
1692        return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1693               skb_network_offset(skb);
1694}
1695
1696static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1697                                  unsigned short type,
1698                                  const void *daddr, const void *saddr,
1699                                  unsigned int len)
1700{
1701        if (!dev->header_ops || !dev->header_ops->create)
1702                return 0;
1703
1704        return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
1705}
1706
1707static inline int dev_parse_header(const struct sk_buff *skb,
1708                                   unsigned char *haddr)
1709{
1710        const struct net_device *dev = skb->dev;
1711
1712        if (!dev->header_ops || !dev->header_ops->parse)
1713                return 0;
1714        return dev->header_ops->parse(skb, haddr);
1715}
1716
1717typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1718extern int              register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1719static inline int unregister_gifconf(unsigned int family)
1720{
1721        return register_gifconf(family, NULL);
1722}
1723
1724/*
1725 * Incoming packets are placed on per-cpu queues
1726 */
1727struct softnet_data {
1728        struct Qdisc            *output_queue;
1729        struct Qdisc            **output_queue_tailp;
1730        struct list_head        poll_list;
1731        struct sk_buff          *completion_queue;
1732        struct sk_buff_head     process_queue;
1733
1734        /* stats */
1735        unsigned int            processed;
1736        unsigned int            time_squeeze;
1737        unsigned int            cpu_collision;
1738        unsigned int            received_rps;
1739
1740#ifdef CONFIG_RPS
1741        struct softnet_data     *rps_ipi_list;
1742
1743        /* Elements below can be accessed between CPUs for RPS */
1744        struct call_single_data csd ____cacheline_aligned_in_smp;
1745        struct softnet_data     *rps_ipi_next;
1746        unsigned int            cpu;
1747        unsigned int            input_queue_head;
1748        unsigned int            input_queue_tail;
1749#endif
1750        unsigned int            dropped;
1751        struct sk_buff_head     input_pkt_queue;
1752        struct napi_struct      backlog;
1753};
1754
1755static inline void input_queue_head_incr(struct softnet_data *sd)
1756{
1757#ifdef CONFIG_RPS
1758        sd->input_queue_head++;
1759#endif
1760}
1761
1762static inline void input_queue_tail_incr_save(struct softnet_data *sd,
1763                                              unsigned int *qtail)
1764{
1765#ifdef CONFIG_RPS
1766        *qtail = ++sd->input_queue_tail;
1767#endif
1768}
1769
1770DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
1771
1772extern void __netif_schedule(struct Qdisc *q);
1773
1774static inline void netif_schedule_queue(struct netdev_queue *txq)
1775{
1776        if (!(txq->state & QUEUE_STATE_ANY_XOFF))
1777                __netif_schedule(txq->qdisc);
1778}
1779
1780static inline void netif_tx_schedule_all(struct net_device *dev)
1781{
1782        unsigned int i;
1783
1784        for (i = 0; i < dev->num_tx_queues; i++)
1785                netif_schedule_queue(netdev_get_tx_queue(dev, i));
1786}
1787
1788static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1789{
1790        clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1791}
1792
1793/**
1794 *      netif_start_queue - allow transmit
1795 *      @dev: network device
1796 *
1797 *      Allow upper layers to call the device hard_start_xmit routine.
1798 */
1799static inline void netif_start_queue(struct net_device *dev)
1800{
1801        netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
1802}
1803
1804static inline void netif_tx_start_all_queues(struct net_device *dev)
1805{
1806        unsigned int i;
1807
1808        for (i = 0; i < dev->num_tx_queues; i++) {
1809                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1810                netif_tx_start_queue(txq);
1811        }
1812}
1813
1814static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
1815{
1816#ifdef CONFIG_NETPOLL_TRAP
1817        if (netpoll_trap()) {
1818                netif_tx_start_queue(dev_queue);
1819                return;
1820        }
1821#endif
1822        if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
1823                __netif_schedule(dev_queue->qdisc);
1824}
1825
1826/**
1827 *      netif_wake_queue - restart transmit
1828 *      @dev: network device
1829 *
1830 *      Allow upper layers to call the device hard_start_xmit routine.
1831 *      Used for flow control when transmit resources are available.
1832 */
1833static inline void netif_wake_queue(struct net_device *dev)
1834{
1835        netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
1836}
1837
1838static inline void netif_tx_wake_all_queues(struct net_device *dev)
1839{
1840        unsigned int i;
1841
1842        for (i = 0; i < dev->num_tx_queues; i++) {
1843                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1844                netif_tx_wake_queue(txq);
1845        }
1846}
1847
1848static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1849{
1850        if (WARN_ON(!dev_queue)) {
1851                pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
1852                return;
1853        }
1854        set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1855}
1856
1857/**
1858 *      netif_stop_queue - stop transmitted packets
1859 *      @dev: network device
1860 *
1861 *      Stop upper layers calling the device hard_start_xmit routine.
1862 *      Used for flow control when transmit resources are unavailable.
1863 */
1864static inline void netif_stop_queue(struct net_device *dev)
1865{
1866        netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
1867}
1868
1869static inline void netif_tx_stop_all_queues(struct net_device *dev)
1870{
1871        unsigned int i;
1872
1873        for (i = 0; i < dev->num_tx_queues; i++) {
1874                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1875                netif_tx_stop_queue(txq);
1876        }
1877}
1878
1879static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1880{
1881        return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1882}
1883
1884/**
1885 *      netif_queue_stopped - test if transmit queue is flowblocked
1886 *      @dev: network device
1887 *
1888 *      Test if transmit queue on device is currently unable to send.
1889 */
1890static inline bool netif_queue_stopped(const struct net_device *dev)
1891{
1892        return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1893}
1894
1895static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
1896{
1897        return dev_queue->state & QUEUE_STATE_ANY_XOFF;
1898}
1899
1900static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
1901{
1902        return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
1903}
1904
1905static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
1906                                        unsigned int bytes)
1907{
1908#ifdef CONFIG_BQL
1909        dql_queued(&dev_queue->dql, bytes);
1910
1911        if (likely(dql_avail(&dev_queue->dql) >= 0))
1912                return;
1913
1914        set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
1915
1916        /*
1917         * The XOFF flag must be set before checking the dql_avail below,
1918         * because in netdev_tx_completed_queue we update the dql_completed
1919         * before checking the XOFF flag.
1920         */
1921        smp_mb();
1922
1923        /* check again in case another CPU has just made room avail */
1924        if (unlikely(dql_avail(&dev_queue->dql) >= 0))
1925                clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
1926#endif
1927}
1928
1929static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
1930{
1931        netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
1932}
1933
1934static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
1935                                             unsigned int pkts, unsigned int bytes)
1936{
1937#ifdef CONFIG_BQL
1938        if (unlikely(!bytes))
1939                return;
1940
1941        dql_completed(&dev_queue->dql, bytes);
1942
1943        /*
1944         * Without the memory barrier there is a small possiblity that
1945         * netdev_tx_sent_queue will miss the update and cause the queue to
1946         * be stopped forever
1947         */
1948        smp_mb();
1949
1950        if (dql_avail(&dev_queue->dql) < 0)
1951                return;
1952
1953        if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
1954                netif_schedule_queue(dev_queue);
1955#endif
1956}
1957
1958static inline void netdev_completed_queue(struct net_device *dev,
1959                                          unsigned int pkts, unsigned int bytes)
1960{
1961        netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
1962}
1963
1964static inline void netdev_tx_reset_queue(struct netdev_queue *q)
1965{
1966#ifdef CONFIG_BQL
1967        clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
1968        dql_reset(&q->dql);
1969#endif
1970}
1971
1972static inline void netdev_reset_queue(struct net_device *dev_queue)
1973{
1974        netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
1975}
1976
1977/**
1978 *      netif_running - test if up
1979 *      @dev: network device
1980 *
1981 *      Test if the device has been brought up.
1982 */
1983static inline bool netif_running(const struct net_device *dev)
1984{
1985        return test_bit(__LINK_STATE_START, &dev->state);
1986}
1987
1988/*
1989 * Routines to manage the subqueues on a device.  We only need start
1990 * stop, and a check if it's stopped.  All other device management is
1991 * done at the overall netdevice level.
1992 * Also test the device if we're multiqueue.
1993 */
1994
1995/**
1996 *      netif_start_subqueue - allow sending packets on subqueue
1997 *      @dev: network device
1998 *      @queue_index: sub queue index
1999 *
2000 * Start individual transmit queue of a device with multiple transmit queues.
2001 */
2002static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
2003{
2004        struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2005
2006        netif_tx_start_queue(txq);
2007}
2008
2009/**
2010 *      netif_stop_subqueue - stop sending packets on subqueue
2011 *      @dev: network device
2012 *      @queue_index: sub queue index
2013 *
2014 * Stop individual transmit queue of a device with multiple transmit queues.
2015 */
2016static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
2017{
2018        struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2019#ifdef CONFIG_NETPOLL_TRAP
2020        if (netpoll_trap())
2021                return;
2022#endif
2023        netif_tx_stop_queue(txq);
2024}
2025
2026/**
2027 *      netif_subqueue_stopped - test status of subqueue
2028 *      @dev: network device
2029 *      @queue_index: sub queue index
2030 *
2031 * Check individual transmit queue of a device with multiple transmit queues.
2032 */
2033static inline bool __netif_subqueue_stopped(const struct net_device *dev,
2034                                            u16 queue_index)
2035{
2036        struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2037
2038        return netif_tx_queue_stopped(txq);
2039}
2040
2041static inline bool netif_subqueue_stopped(const struct net_device *dev,
2042                                          struct sk_buff *skb)
2043{
2044        return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
2045}
2046
2047/**
2048 *      netif_wake_subqueue - allow sending packets on subqueue
2049 *      @dev: network device
2050 *      @queue_index: sub queue index
2051 *
2052 * Resume individual transmit queue of a device with multiple transmit queues.
2053 */
2054static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2055{
2056        struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2057#ifdef CONFIG_NETPOLL_TRAP
2058        if (netpoll_trap())
2059                return;
2060#endif
2061        if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
2062                __netif_schedule(txq->qdisc);
2063}
2064
2065/*
2066 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
2067 * as a distribution range limit for the returned value.
2068 */
2069static inline u16 skb_tx_hash(const struct net_device *dev,
2070                              const struct sk_buff *skb)
2071{
2072        return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
2073}
2074
2075/**
2076 *      netif_is_multiqueue - test if device has multiple transmit queues
2077 *      @dev: network device
2078 *
2079 * Check if device has multiple transmit queues
2080 */
2081static inline bool netif_is_multiqueue(const struct net_device *dev)
2082{
2083        return dev->num_tx_queues > 1;
2084}
2085
2086extern int netif_set_real_num_tx_queues(struct net_device *dev,
2087                                        unsigned int txq);
2088
2089#ifdef CONFIG_RPS
2090extern int netif_set_real_num_rx_queues(struct net_device *dev,
2091                                        unsigned int rxq);
2092#else
2093static inline int netif_set_real_num_rx_queues(struct net_device *dev,
2094                                                unsigned int rxq)
2095{
2096        return 0;
2097}
2098#endif
2099
2100static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2101                                             const struct net_device *from_dev)
2102{
2103        int err;
2104
2105        err = netif_set_real_num_tx_queues(to_dev,
2106                                           from_dev->real_num_tx_queues);
2107        if (err)
2108                return err;
2109#ifdef CONFIG_RPS
2110        return netif_set_real_num_rx_queues(to_dev,
2111                                            from_dev->real_num_rx_queues);
2112#else
2113        return 0;
2114#endif
2115}
2116
2117#define DEFAULT_MAX_NUM_RSS_QUEUES      (8)
2118extern int netif_get_num_default_rss_queues(void);
2119
2120/* Use this variant when it is known for sure that it
2121 * is executing from hardware interrupt context or with hardware interrupts
2122 * disabled.
2123 */
2124extern void dev_kfree_skb_irq(struct sk_buff *skb);
2125
2126/* Use this variant in places where it could be invoked
2127 * from either hardware interrupt or other context, with hardware interrupts
2128 * either disabled or enabled.
2129 */
2130extern void dev_kfree_skb_any(struct sk_buff *skb);
2131
2132extern int              netif_rx(struct sk_buff *skb);
2133extern int              netif_rx_ni(struct sk_buff *skb);
2134extern int              netif_receive_skb(struct sk_buff *skb);
2135extern gro_result_t     dev_gro_receive(struct napi_struct *napi,
2136                                        struct sk_buff *skb);
2137extern gro_result_t     napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
2138extern gro_result_t     napi_gro_receive(struct napi_struct *napi,
2139                                         struct sk_buff *skb);
2140extern void             napi_gro_flush(struct napi_struct *napi, bool flush_old);
2141extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
2142extern gro_result_t     napi_frags_finish(struct napi_struct *napi,
2143                                          struct sk_buff *skb,
2144                                          gro_result_t ret);
2145extern gro_result_t     napi_gro_frags(struct napi_struct *napi);
2146
2147static inline void napi_free_frags(struct napi_struct *napi)
2148{
2149        kfree_skb(napi->skb);
2150        napi->skb = NULL;
2151}
2152
2153extern int netdev_rx_handler_register(struct net_device *dev,
2154                                      rx_handler_func_t *rx_handler,
2155                                      void *rx_handler_data);
2156extern void netdev_rx_handler_unregister(struct net_device *dev);
2157
2158extern bool             dev_valid_name(const char *name);
2159extern int              dev_ioctl(struct net *net, unsigned int cmd, void __user *);
2160extern int              dev_ethtool(struct net *net, struct ifreq *);
2161extern unsigned int     dev_get_flags(const struct net_device *);
2162extern int              __dev_change_flags(struct net_device *, unsigned int flags);
2163extern int              dev_change_flags(struct net_device *, unsigned int);
2164extern void             __dev_notify_flags(struct net_device *, unsigned int old_flags);
2165extern int              dev_change_name(struct net_device *, const char *);
2166extern int              dev_set_alias(struct net_device *, const char *, size_t);
2167extern int              dev_change_net_namespace(struct net_device *,
2168                                                 struct net *, const char *);
2169extern int              dev_set_mtu(struct net_device *, int);
2170extern void             dev_set_group(struct net_device *, int);
2171extern int              dev_set_mac_address(struct net_device *,
2172                                            struct sockaddr *);
2173extern int              dev_hard_start_xmit(struct sk_buff *skb,
2174                                            struct net_device *dev,
2175                                            struct netdev_queue *txq);
2176extern int              dev_forward_skb(struct net_device *dev,
2177                                        struct sk_buff *skb);
2178
2179extern int              netdev_budget;
2180
2181/* Called by rtnetlink.c:rtnl_unlock() */
2182extern void netdev_run_todo(void);
2183
2184/**
2185 *      dev_put - release reference to device
2186 *      @dev: network device
2187 *
2188 * Release reference to device to allow it to be freed.
2189 */
2190static inline void dev_put(struct net_device *dev)
2191{
2192        this_cpu_dec(*dev->pcpu_refcnt);
2193}
2194
2195/**
2196 *      dev_hold - get reference to device
2197 *      @dev: network device
2198 *
2199 * Hold reference to device to keep it from being freed.
2200 */
2201static inline void dev_hold(struct net_device *dev)
2202{
2203        this_cpu_inc(*dev->pcpu_refcnt);
2204}
2205
2206/* Carrier loss detection, dial on demand. The functions netif_carrier_on
2207 * and _off may be called from IRQ context, but it is caller
2208 * who is responsible for serialization of these calls.
2209 *
2210 * The name carrier is inappropriate, these functions should really be
2211 * called netif_lowerlayer_*() because they represent the state of any
2212 * kind of lower layer not just hardware media.
2213 */
2214
2215extern void linkwatch_init_dev(struct net_device *dev);
2216extern void linkwatch_fire_event(struct net_device *dev);
2217extern void linkwatch_forget_dev(struct net_device *dev);
2218
2219/**
2220 *      netif_carrier_ok - test if carrier present
2221 *      @dev: network device
2222 *
2223 * Check if carrier is present on device
2224 */
2225static inline bool netif_carrier_ok(const struct net_device *dev)
2226{
2227        return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
2228}
2229
2230extern unsigned long dev_trans_start(struct net_device *dev);
2231
2232extern void __netdev_watchdog_up(struct net_device *dev);
2233
2234extern void netif_carrier_on(struct net_device *dev);
2235
2236extern void netif_carrier_off(struct net_device *dev);
2237
2238/**
2239 *      netif_dormant_on - mark device as dormant.
2240 *      @dev: network device
2241 *
2242 * Mark device as dormant (as per RFC2863).
2243 *
2244 * The dormant state indicates that the relevant interface is not
2245 * actually in a condition to pass packets (i.e., it is not 'up') but is
2246 * in a "pending" state, waiting for some external event.  For "on-
2247 * demand" interfaces, this new state identifies the situation where the
2248 * interface is waiting for events to place it in the up state.
2249 *
2250 */
2251static inline void netif_dormant_on(struct net_device *dev)
2252{
2253        if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
2254                linkwatch_fire_event(dev);
2255}
2256
2257/**
2258 *      netif_dormant_off - set device as not dormant.
2259 *      @dev: network device
2260 *
2261 * Device is not in dormant state.
2262 */
2263static inline void netif_dormant_off(struct net_device *dev)
2264{
2265        if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
2266                linkwatch_fire_event(dev);
2267}
2268
2269/**
2270 *      netif_dormant - test if carrier present
2271 *      @dev: network device
2272 *
2273 * Check if carrier is present on device
2274 */
2275static inline bool netif_dormant(const struct net_device *dev)
2276{
2277        return test_bit(__LINK_STATE_DORMANT, &dev->state);
2278}
2279
2280
2281/**
2282 *      netif_oper_up - test if device is operational
2283 *      @dev: network device
2284 *
2285 * Check if carrier is operational
2286 */
2287static inline bool netif_oper_up(const struct net_device *dev)
2288{
2289        return (dev->operstate == IF_OPER_UP ||
2290                dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
2291}
2292
2293/**
2294 *      netif_device_present - is device available or removed
2295 *      @dev: network device
2296 *
2297 * Check if device has not been removed from system.
2298 */
2299static inline bool netif_device_present(struct net_device *dev)
2300{
2301        return test_bit(__LINK_STATE_PRESENT, &dev->state);
2302}
2303
2304extern void netif_device_detach(struct net_device *dev);
2305
2306extern void netif_device_attach(struct net_device *dev);
2307
2308/*
2309 * Network interface message level settings
2310 */
2311
2312enum {
2313        NETIF_MSG_DRV           = 0x0001,
2314        NETIF_MSG_PROBE         = 0x0002,
2315        NETIF_MSG_LINK          = 0x0004,
2316        NETIF_MSG_TIMER         = 0x0008,
2317        NETIF_MSG_IFDOWN        = 0x0010,
2318        NETIF_MSG_IFUP          = 0x0020,
2319        NETIF_MSG_RX_ERR        = 0x0040,
2320        NETIF_MSG_TX_ERR        = 0x0080,
2321        NETIF_MSG_TX_QUEUED     = 0x0100,
2322        NETIF_MSG_INTR          = 0x0200,
2323        NETIF_MSG_TX_DONE       = 0x0400,
2324        NETIF_MSG_RX_STATUS     = 0x0800,
2325        NETIF_MSG_PKTDATA       = 0x1000,
2326        NETIF_MSG_HW            = 0x2000,
2327        NETIF_MSG_WOL           = 0x4000,
2328};
2329
2330#define netif_msg_drv(p)        ((p)->msg_enable & NETIF_MSG_DRV)
2331#define netif_msg_probe(p)      ((p)->msg_enable & NETIF_MSG_PROBE)
2332#define netif_msg_link(p)       ((p)->msg_enable & NETIF_MSG_LINK)
2333#define netif_msg_timer(p)      ((p)->msg_enable & NETIF_MSG_TIMER)
2334#define netif_msg_ifdown(p)     ((p)->msg_enable & NETIF_MSG_IFDOWN)
2335#define netif_msg_ifup(p)       ((p)->msg_enable & NETIF_MSG_IFUP)
2336#define netif_msg_rx_err(p)     ((p)->msg_enable & NETIF_MSG_RX_ERR)
2337#define netif_msg_tx_err(p)     ((p)->msg_enable & NETIF_MSG_TX_ERR)
2338#define netif_msg_tx_queued(p)  ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2339#define netif_msg_intr(p)       ((p)->msg_enable & NETIF_MSG_INTR)
2340#define netif_msg_tx_done(p)    ((p)->msg_enable & NETIF_MSG_TX_DONE)
2341#define netif_msg_rx_status(p)  ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2342#define netif_msg_pktdata(p)    ((p)->msg_enable & NETIF_MSG_PKTDATA)
2343#define netif_msg_hw(p)         ((p)->msg_enable & NETIF_MSG_HW)
2344#define netif_msg_wol(p)        ((p)->msg_enable & NETIF_MSG_WOL)
2345
2346static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
2347{
2348        /* use default */
2349        if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
2350                return default_msg_enable_bits;
2351        if (debug_value == 0)   /* no output */
2352                return 0;
2353        /* set low N bits */
2354        return (1 << debug_value) - 1;
2355}
2356
2357static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
2358{
2359        spin_lock(&txq->_xmit_lock);
2360        txq->xmit_lock_owner = cpu;
2361}
2362
2363static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
2364{
2365        spin_lock_bh(&txq->_xmit_lock);
2366        txq->xmit_lock_owner = smp_processor_id();
2367}
2368
2369static inline bool __netif_tx_trylock(struct netdev_queue *txq)
2370{
2371        bool ok = spin_trylock(&txq->_xmit_lock);
2372        if (likely(ok))
2373                txq->xmit_lock_owner = smp_processor_id();
2374        return ok;
2375}
2376
2377static inline void __netif_tx_unlock(struct netdev_queue *txq)
2378{
2379        txq->xmit_lock_owner = -1;
2380        spin_unlock(&txq->_xmit_lock);
2381}
2382
2383static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
2384{
2385        txq->xmit_lock_owner = -1;
2386        spin_unlock_bh(&txq->_xmit_lock);
2387}
2388
2389static inline void txq_trans_update(struct netdev_queue *txq)
2390{
2391        if (txq->xmit_lock_owner != -1)
2392                txq->trans_start = jiffies;
2393}
2394
2395/**
2396 *      netif_tx_lock - grab network device transmit lock
2397 *      @dev: network device
2398 *
2399 * Get network device transmit lock
2400 */
2401static inline void netif_tx_lock(struct net_device *dev)
2402{
2403        unsigned int i;
2404        int cpu;
2405
2406        spin_lock(&dev->tx_global_lock);
2407        cpu = smp_processor_id();
2408        for (i = 0; i < dev->num_tx_queues; i++) {
2409                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2410
2411                /* We are the only thread of execution doing a
2412                 * freeze, but we have to grab the _xmit_lock in
2413                 * order to synchronize with threads which are in
2414                 * the ->hard_start_xmit() handler and already
2415                 * checked the frozen bit.
2416                 */
2417                __netif_tx_lock(txq, cpu);
2418                set_bit(__QUEUE_STATE_FROZEN, &txq->state);
2419                __netif_tx_unlock(txq);
2420        }
2421}
2422
2423static inline void netif_tx_lock_bh(struct net_device *dev)
2424{
2425        local_bh_disable();
2426        netif_tx_lock(dev);
2427}
2428
2429static inline void netif_tx_unlock(struct net_device *dev)
2430{
2431        unsigned int i;
2432
2433        for (i = 0; i < dev->num_tx_queues; i++) {
2434                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2435
2436                /* No need to grab the _xmit_lock here.  If the
2437                 * queue is not stopped for another reason, we
2438                 * force a schedule.
2439                 */
2440                clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
2441                netif_schedule_queue(txq);
2442        }
2443        spin_unlock(&dev->tx_global_lock);
2444}
2445
2446static inline void netif_tx_unlock_bh(struct net_device *dev)
2447{
2448        netif_tx_unlock(dev);
2449        local_bh_enable();
2450}
2451
2452#define HARD_TX_LOCK(dev, txq, cpu) {                   \
2453        if ((dev->features & NETIF_F_LLTX) == 0) {      \
2454                __netif_tx_lock(txq, cpu);              \
2455        }                                               \
2456}
2457
2458#define HARD_TX_UNLOCK(dev, txq) {                      \
2459        if ((dev->features & NETIF_F_LLTX) == 0) {      \
2460                __netif_tx_unlock(txq);                 \
2461        }                                               \
2462}
2463
2464static inline void netif_tx_disable(struct net_device *dev)
2465{
2466        unsigned int i;
2467        int cpu;
2468
2469        local_bh_disable();
2470        cpu = smp_processor_id();
2471        for (i = 0; i < dev->num_tx_queues; i++) {
2472                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2473
2474                __netif_tx_lock(txq, cpu);
2475                netif_tx_stop_queue(txq);
2476                __netif_tx_unlock(txq);
2477        }
2478        local_bh_enable();
2479}
2480
2481static inline void netif_addr_lock(struct net_device *dev)
2482{
2483        spin_lock(&dev->addr_list_lock);
2484}
2485
2486static inline void netif_addr_lock_nested(struct net_device *dev)
2487{
2488        spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
2489}
2490
2491static inline void netif_addr_lock_bh(struct net_device *dev)
2492{
2493        spin_lock_bh(&dev->addr_list_lock);
2494}
2495
2496static inline void netif_addr_unlock(struct net_device *dev)
2497{
2498        spin_unlock(&dev->addr_list_lock);
2499}
2500
2501static inline void netif_addr_unlock_bh(struct net_device *dev)
2502{
2503        spin_unlock_bh(&dev->addr_list_lock);
2504}
2505
2506/*
2507 * dev_addrs walker. Should be used only for read access. Call with
2508 * rcu_read_lock held.
2509 */
2510#define for_each_dev_addr(dev, ha) \
2511                list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
2512
2513/* These functions live elsewhere (drivers/net/net_init.c, but related) */
2514
2515extern void             ether_setup(struct net_device *dev);
2516
2517/* Support for loadable net-drivers */
2518extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
2519                                       void (*setup)(struct net_device *),
2520                                       unsigned int txqs, unsigned int rxqs);
2521#define alloc_netdev(sizeof_priv, name, setup) \
2522        alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2523
2524#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2525        alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2526
2527extern int              register_netdev(struct net_device *dev);
2528extern void             unregister_netdev(struct net_device *dev);
2529
2530/* General hardware address lists handling functions */
2531extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
2532                                  struct netdev_hw_addr_list *from_list,
2533                                  int addr_len, unsigned char addr_type);
2534extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
2535                                   struct netdev_hw_addr_list *from_list,
2536                                   int addr_len, unsigned char addr_type);
2537extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
2538                          struct netdev_hw_addr_list *from_list,
2539                          int addr_len);
2540extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
2541                             struct netdev_hw_addr_list *from_list,
2542                             int addr_len);
2543extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
2544extern void __hw_addr_init(struct netdev_hw_addr_list *list);
2545
2546/* Functions used for device addresses handling */
2547extern int dev_addr_add(struct net_device *dev, const unsigned char *addr,
2548                        unsigned char addr_type);
2549extern int dev_addr_del(struct net_device *dev, const unsigned char *addr,
2550                        unsigned char addr_type);
2551extern int dev_addr_add_multiple(struct net_device *to_dev,
2552                                 struct net_device *from_dev,
2553                                 unsigned char addr_type);
2554extern int dev_addr_del_multiple(struct net_device *to_dev,
2555                                 struct net_device *from_dev,
2556                                 unsigned char addr_type);
2557extern void dev_addr_flush(struct net_device *dev);
2558extern int dev_addr_init(struct net_device *dev);
2559
2560/* Functions used for unicast addresses handling */
2561extern int dev_uc_add(struct net_device *dev, const unsigned char *addr);
2562extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
2563extern int dev_uc_del(struct net_device *dev, const unsigned char *addr);
2564extern int dev_uc_sync(struct net_device *to, struct net_device *from);
2565extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
2566extern void dev_uc_flush(struct net_device *dev);
2567extern void dev_uc_init(struct net_device *dev);
2568
2569/* Functions used for multicast addresses handling */
2570extern int dev_mc_add(struct net_device *dev, const unsigned char *addr);
2571extern int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
2572extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
2573extern int dev_mc_del(struct net_device *dev, const unsigned char *addr);
2574extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
2575extern int dev_mc_sync(struct net_device *to, struct net_device *from);
2576extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
2577extern void dev_mc_flush(struct net_device *dev);
2578extern void dev_mc_init(struct net_device *dev);
2579
2580/* Functions used for secondary unicast and multicast support */
2581extern void             dev_set_rx_mode(struct net_device *dev);
2582extern void             __dev_set_rx_mode(struct net_device *dev);
2583extern int              dev_set_promiscuity(struct net_device *dev, int inc);
2584extern int              dev_set_allmulti(struct net_device *dev, int inc);
2585extern void             netdev_state_change(struct net_device *dev);
2586extern void             netdev_notify_peers(struct net_device *dev);
2587extern void             netdev_features_change(struct net_device *dev);
2588/* Load a device via the kmod */
2589extern void             dev_load(struct net *net, const char *name);
2590extern void             dev_mcast_init(void);
2591extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
2592                                               struct rtnl_link_stats64 *storage);
2593extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
2594                                    const struct net_device_stats *netdev_stats);
2595
2596extern int              netdev_max_backlog;
2597extern int              netdev_tstamp_prequeue;
2598extern int              weight_p;
2599extern int              bpf_jit_enable;
2600extern int              netdev_set_master(struct net_device *dev, struct net_device *master);
2601extern int netdev_set_bond_master(struct net_device *dev,
2602                                  struct net_device *master);
2603extern int skb_checksum_help(struct sk_buff *skb);
2604extern struct sk_buff *skb_gso_segment(struct sk_buff *skb,
2605        netdev_features_t features);
2606#ifdef CONFIG_BUG
2607extern void netdev_rx_csum_fault(struct net_device *dev);
2608#else
2609static inline void netdev_rx_csum_fault(struct net_device *dev)
2610{
2611}
2612#endif
2613/* rx skb timestamps */
2614extern void             net_enable_timestamp(void);
2615extern void             net_disable_timestamp(void);
2616
2617#ifdef CONFIG_PROC_FS
2618extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
2619extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2620extern void dev_seq_stop(struct seq_file *seq, void *v);
2621#endif
2622
2623extern int netdev_class_create_file(struct class_attribute *class_attr);
2624extern void netdev_class_remove_file(struct class_attribute *class_attr);
2625
2626extern struct kobj_ns_type_operations net_ns_type_operations;
2627
2628extern const char *netdev_drivername(const struct net_device *dev);
2629
2630extern void linkwatch_run_queue(void);
2631
2632static inline netdev_features_t netdev_get_wanted_features(
2633        struct net_device *dev)
2634{
2635        return (dev->features & ~dev->hw_features) | dev->wanted_features;
2636}
2637netdev_features_t netdev_increment_features(netdev_features_t all,
2638        netdev_features_t one, netdev_features_t mask);
2639int __netdev_update_features(struct net_device *dev);
2640void netdev_update_features(struct net_device *dev);
2641void netdev_change_features(struct net_device *dev);
2642
2643void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2644                                        struct net_device *dev);
2645
2646netdev_features_t netif_skb_features(struct sk_buff *skb);
2647
2648static inline bool net_gso_ok(netdev_features_t features, int gso_type)
2649{
2650        netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
2651
2652        /* check flags correspondence */
2653        BUILD_BUG_ON(SKB_GSO_TCPV4   != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
2654        BUILD_BUG_ON(SKB_GSO_UDP     != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
2655        BUILD_BUG_ON(SKB_GSO_DODGY   != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
2656        BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
2657        BUILD_BUG_ON(SKB_GSO_TCPV6   != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
2658        BUILD_BUG_ON(SKB_GSO_FCOE    != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
2659
2660        return (features & feature) == feature;
2661}
2662
2663static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
2664{
2665        return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
2666               (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
2667}
2668
2669static inline bool netif_needs_gso(struct sk_buff *skb,
2670                                   netdev_features_t features)
2671{
2672        return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
2673                unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
2674                         (skb->ip_summed != CHECKSUM_UNNECESSARY)));
2675}
2676
2677static inline void netif_set_gso_max_size(struct net_device *dev,
2678                                          unsigned int size)
2679{
2680        dev->gso_max_size = size;
2681}
2682
2683static inline bool netif_is_bond_slave(struct net_device *dev)
2684{
2685        return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
2686}
2687
2688static inline bool netif_supports_nofcs(struct net_device *dev)
2689{
2690        return dev->priv_flags & IFF_SUPP_NOFCS;
2691}
2692
2693extern struct pernet_operations __net_initdata loopback_net_ops;
2694
2695/* Logging, debugging and troubleshooting/diagnostic helpers. */
2696
2697/* netdev_printk helpers, similar to dev_printk */
2698
2699static inline const char *netdev_name(const struct net_device *dev)
2700{
2701        if (dev->reg_state != NETREG_REGISTERED)
2702                return "(unregistered net_device)";
2703        return dev->name;
2704}
2705
2706extern __printf(3, 4)
2707int netdev_printk(const char *level, const struct net_device *dev,
2708                  const char *format, ...);
2709extern __printf(2, 3)
2710int netdev_emerg(const struct net_device *dev, const char *format, ...);
2711extern __printf(2, 3)
2712int netdev_alert(const struct net_device *dev, const char *format, ...);
2713extern __printf(2, 3)
2714int netdev_crit(const struct net_device *dev, const char *format, ...);
2715extern __printf(2, 3)
2716int netdev_err(const struct net_device *dev, const char *format, ...);
2717extern __printf(2, 3)
2718int netdev_warn(const struct net_device *dev, const char *format, ...);
2719extern __printf(2, 3)
2720int netdev_notice(const struct net_device *dev, const char *format, ...);
2721extern __printf(2, 3)
2722int netdev_info(const struct net_device *dev, const char *format, ...);
2723
2724#define MODULE_ALIAS_NETDEV(device) \
2725        MODULE_ALIAS("netdev-" device)
2726
2727#if defined(CONFIG_DYNAMIC_DEBUG)
2728#define netdev_dbg(__dev, format, args...)                      \
2729do {                                                            \
2730        dynamic_netdev_dbg(__dev, format, ##args);              \
2731} while (0)
2732#elif defined(DEBUG)
2733#define netdev_dbg(__dev, format, args...)                      \
2734        netdev_printk(KERN_DEBUG, __dev, format, ##args)
2735#else
2736#define netdev_dbg(__dev, format, args...)                      \
2737({                                                              \
2738        if (0)                                                  \
2739                netdev_printk(KERN_DEBUG, __dev, format, ##args); \
2740        0;                                                      \
2741})
2742#endif
2743
2744#if defined(VERBOSE_DEBUG)
2745#define netdev_vdbg     netdev_dbg
2746#else
2747
2748#define netdev_vdbg(dev, format, args...)                       \
2749({                                                              \
2750        if (0)                                                  \
2751                netdev_printk(KERN_DEBUG, dev, format, ##args); \
2752        0;                                                      \
2753})
2754#endif
2755
2756/*
2757 * netdev_WARN() acts like dev_printk(), but with the key difference
2758 * of using a WARN/WARN_ON to get the message out, including the
2759 * file/line information and a backtrace.
2760 */
2761#define netdev_WARN(dev, format, args...)                       \
2762        WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
2763
2764/* netif printk helpers, similar to netdev_printk */
2765
2766#define netif_printk(priv, type, level, dev, fmt, args...)      \
2767do {                                                            \
2768        if (netif_msg_##type(priv))                             \
2769                netdev_printk(level, (dev), fmt, ##args);       \
2770} while (0)
2771
2772#define netif_level(level, priv, type, dev, fmt, args...)       \
2773do {                                                            \
2774        if (netif_msg_##type(priv))                             \
2775                netdev_##level(dev, fmt, ##args);               \
2776} while (0)
2777
2778#define netif_emerg(priv, type, dev, fmt, args...)              \
2779        netif_level(emerg, priv, type, dev, fmt, ##args)
2780#define netif_alert(priv, type, dev, fmt, args...)              \
2781        netif_level(alert, priv, type, dev, fmt, ##args)
2782#define netif_crit(priv, type, dev, fmt, args...)               \
2783        netif_level(crit, priv, type, dev, fmt, ##args)
2784#define netif_err(priv, type, dev, fmt, args...)                \
2785        netif_level(err, priv, type, dev, fmt, ##args)
2786#define netif_warn(priv, type, dev, fmt, args...)               \
2787        netif_level(warn, priv, type, dev, fmt, ##args)
2788#define netif_notice(priv, type, dev, fmt, args...)             \
2789        netif_level(notice, priv, type, dev, fmt, ##args)
2790#define netif_info(priv, type, dev, fmt, args...)               \
2791        netif_level(info, priv, type, dev, fmt, ##args)
2792
2793#if defined(CONFIG_DYNAMIC_DEBUG)
2794#define netif_dbg(priv, type, netdev, format, args...)          \
2795do {                                                            \
2796        if (netif_msg_##type(priv))                             \
2797                dynamic_netdev_dbg(netdev, format, ##args);     \
2798} while (0)
2799#elif defined(DEBUG)
2800#define netif_dbg(priv, type, dev, format, args...)             \
2801        netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
2802#else
2803#define netif_dbg(priv, type, dev, format, args...)                     \
2804({                                                                      \
2805        if (0)                                                          \
2806                netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2807        0;                                                              \
2808})
2809#endif
2810
2811#if defined(VERBOSE_DEBUG)
2812#define netif_vdbg      netif_dbg
2813#else
2814#define netif_vdbg(priv, type, dev, format, args...)            \
2815({                                                              \
2816        if (0)                                                  \
2817                netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2818        0;                                                      \
2819})
2820#endif
2821
2822#endif  /* _LINUX_NETDEVICE_H */
2823
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.