linux/include/linux/netdevice.h
<<
>>
Prefs
   1/*
   2 * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3 *              operating system.  INET is implemented using the  BSD Socket
   4 *              interface as the means of communication with the user level.
   5 *
   6 *              Definitions for the Interfaces handler.
   7 *
   8 * Version:     @(#)dev.h       1.0.10  08/12/93
   9 *
  10 * Authors:     Ross Biro
  11 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *              Corey Minyard <wf-rch!minyard@relay.EU.net>
  13 *              Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
  14 *              Alan Cox, <Alan.Cox@linux.org>
  15 *              Bjorn Ekwall. <bj0rn@blox.se>
  16 *              Pekka Riikonen <priikone@poseidon.pspt.fi>
  17 *
  18 *              This program is free software; you can redistribute it and/or
  19 *              modify it under the terms of the GNU General Public License
  20 *              as published by the Free Software Foundation; either version
  21 *              2 of the License, or (at your option) any later version.
  22 *
  23 *              Moved to /usr/include/linux for NET3
  24 */
  25#ifndef _LINUX_NETDEVICE_H
  26#define _LINUX_NETDEVICE_H
  27
  28#include <linux/if.h>
  29#include <linux/if_ether.h>
  30#include <linux/if_packet.h>
  31
  32#ifdef __KERNEL__
  33#include <linux/timer.h>
  34#include <asm/atomic.h>
  35#include <asm/cache.h>
  36#include <asm/byteorder.h>
  37
  38#include <linux/device.h>
  39#include <linux/percpu.h>
  40#include <linux/dmaengine.h>
  41
  42struct vlan_group;
  43struct ethtool_ops;
  44struct netpoll_info;
  45                                        /* source back-compat hooks */
  46#define SET_ETHTOOL_OPS(netdev,ops) \
  47        ( (netdev)->ethtool_ops = (ops) )
  48
  49#define HAVE_ALLOC_NETDEV               /* feature macro: alloc_xxxdev
  50                                           functions are available. */
  51#define HAVE_FREE_NETDEV                /* free_netdev() */
  52#define HAVE_NETDEV_PRIV                /* netdev_priv() */
  53
  54#define NET_XMIT_SUCCESS        0
  55#define NET_XMIT_DROP           1       /* skb dropped                  */
  56#define NET_XMIT_CN             2       /* congestion notification      */
  57#define NET_XMIT_POLICED        3       /* skb is shot by police        */
  58#define NET_XMIT_BYPASS         4       /* packet does not leave via dequeue;
  59                                           (TC use only - dev_queue_xmit
  60                                           returns this as NET_XMIT_SUCCESS) */
  61
  62/* Backlog congestion levels */
  63#define NET_RX_SUCCESS          0   /* keep 'em coming, baby */
  64#define NET_RX_DROP             1  /* packet dropped */
  65#define NET_RX_CN_LOW           2   /* storm alert, just in case */
  66#define NET_RX_CN_MOD           3   /* Storm on its way! */
  67#define NET_RX_CN_HIGH          4   /* The storm is here */
  68#define NET_RX_BAD              5  /* packet dropped due to kernel error */
  69
  70/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
  71 * indicates that the device will soon be dropping packets, or already drops
  72 * some packets of the same priority; prompting us to send less aggressively. */
  73#define net_xmit_eval(e)        ((e) == NET_XMIT_CN? 0 : (e))
  74#define net_xmit_errno(e)       ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
  75
  76#endif
  77
  78#define MAX_ADDR_LEN    32              /* Largest hardware address length */
  79
  80/* Driver transmit return codes */
  81#define NETDEV_TX_OK 0          /* driver took care of packet */
  82#define NETDEV_TX_BUSY 1        /* driver tx path was busy*/
  83#define NETDEV_TX_LOCKED -1     /* driver tx lock was already taken */
  84
  85/*
  86 *      Compute the worst case header length according to the protocols
  87 *      used.
  88 */
  89 
  90#if !defined(CONFIG_AX25) && !defined(CONFIG_AX25_MODULE) && !defined(CONFIG_TR)
  91#define LL_MAX_HEADER   32
  92#else
  93#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
  94#define LL_MAX_HEADER   96
  95#else
  96#define LL_MAX_HEADER   48
  97#endif
  98#endif
  99
 100#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
 101    !defined(CONFIG_NET_IPGRE) &&  !defined(CONFIG_NET_IPGRE_MODULE) && \
 102    !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
 103    !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
 104#define MAX_HEADER LL_MAX_HEADER
 105#else
 106#define MAX_HEADER (LL_MAX_HEADER + 48)
 107#endif
 108
 109/*
 110 *      Network device statistics. Akin to the 2.0 ether stats but
 111 *      with byte counters.
 112 */
 113 
 114struct net_device_stats
 115{
 116        unsigned long   rx_packets;             /* total packets received       */
 117        unsigned long   tx_packets;             /* total packets transmitted    */
 118        unsigned long   rx_bytes;               /* total bytes received         */
 119        unsigned long   tx_bytes;               /* total bytes transmitted      */
 120        unsigned long   rx_errors;              /* bad packets received         */
 121        unsigned long   tx_errors;              /* packet transmit problems     */
 122        unsigned long   rx_dropped;             /* no space in linux buffers    */
 123        unsigned long   tx_dropped;             /* no space available in linux  */
 124        unsigned long   multicast;              /* multicast packets received   */
 125        unsigned long   collisions;
 126
 127        /* detailed rx_errors: */
 128        unsigned long   rx_length_errors;
 129        unsigned long   rx_over_errors;         /* receiver ring buff overflow  */
 130        unsigned long   rx_crc_errors;          /* recved pkt with crc error    */
 131        unsigned long   rx_frame_errors;        /* recv'd frame alignment error */
 132        unsigned long   rx_fifo_errors;         /* recv'r fifo overrun          */
 133        unsigned long   rx_missed_errors;       /* receiver missed packet       */
 134
 135        /* detailed tx_errors */
 136        unsigned long   tx_aborted_errors;
 137        unsigned long   tx_carrier_errors;
 138        unsigned long   tx_fifo_errors;
 139        unsigned long   tx_heartbeat_errors;
 140        unsigned long   tx_window_errors;
 141        
 142        /* for cslip etc */
 143        unsigned long   rx_compressed;
 144        unsigned long   tx_compressed;
 145};
 146
 147
 148/* Media selection options. */
 149enum {
 150        IF_PORT_UNKNOWN = 0,
 151        IF_PORT_10BASE2,
 152        IF_PORT_10BASET,
 153        IF_PORT_AUI,
 154        IF_PORT_100BASET,
 155        IF_PORT_100BASETX,
 156        IF_PORT_100BASEFX
 157};
 158
 159#ifdef __KERNEL__
 160
 161#include <linux/cache.h>
 162#include <linux/skbuff.h>
 163
 164struct neighbour;
 165struct neigh_parms;
 166struct sk_buff;
 167
 168struct netif_rx_stats
 169{
 170        unsigned total;
 171        unsigned dropped;
 172        unsigned time_squeeze;
 173        unsigned cpu_collision;
 174};
 175
 176DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
 177
 178
 179/*
 180 *      We tag multicasts with these structures.
 181 */
 182 
 183struct dev_mc_list
 184{       
 185        struct dev_mc_list      *next;
 186        __u8                    dmi_addr[MAX_ADDR_LEN];
 187        unsigned char           dmi_addrlen;
 188        int                     dmi_users;
 189        int                     dmi_gusers;
 190};
 191
 192struct hh_cache
 193{
 194        struct hh_cache *hh_next;       /* Next entry                        */
 195        atomic_t        hh_refcnt;      /* number of users                   */
 196/*
 197 * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
 198 * cache line on SMP.
 199 * They are mostly read, but hh_refcnt may be changed quite frequently,
 200 * incurring cache line ping pongs.
 201 */
 202        __be16          hh_type ____cacheline_aligned_in_smp;
 203                                        /* protocol identifier, f.e ETH_P_IP
 204                                         *  NOTE:  For VLANs, this will be the
 205                                         *  encapuslated type. --BLG
 206                                         */
 207        u16             hh_len;         /* length of header */
 208        int             (*hh_output)(struct sk_buff *skb);
 209        seqlock_t       hh_lock;
 210
 211        /* cached hardware header; allow for machine alignment needs.        */
 212#define HH_DATA_MOD     16
 213#define HH_DATA_OFF(__len) \
 214        (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
 215#define HH_DATA_ALIGN(__len) \
 216        (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
 217        unsigned long   hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
 218};
 219
 220/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
 221 * Alternative is:
 222 *   dev->hard_header_len ? (dev->hard_header_len +
 223 *                           (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
 224 *
 225 * We could use other alignment values, but we must maintain the
 226 * relationship HH alignment <= LL alignment.
 227 */
 228#define LL_RESERVED_SPACE(dev) \
 229        (((dev)->hard_header_len&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
 230#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
 231        ((((dev)->hard_header_len+extra)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
 232
 233/* These flag bits are private to the generic network queueing
 234 * layer, they may not be explicitly referenced by any other
 235 * code.
 236 */
 237
 238enum netdev_state_t
 239{
 240        __LINK_STATE_XOFF=0,
 241        __LINK_STATE_START,
 242        __LINK_STATE_PRESENT,
 243        __LINK_STATE_SCHED,
 244        __LINK_STATE_NOCARRIER,
 245        __LINK_STATE_RX_SCHED,
 246        __LINK_STATE_LINKWATCH_PENDING,
 247        __LINK_STATE_DORMANT,
 248        __LINK_STATE_QDISC_RUNNING,
 249};
 250
 251
 252/*
 253 * This structure holds at boot time configured netdevice settings. They
 254 * are then used in the device probing. 
 255 */
 256struct netdev_boot_setup {
 257        char name[IFNAMSIZ];
 258        struct ifmap map;
 259};
 260#define NETDEV_BOOT_SETUP_MAX 8
 261
 262extern int __init netdev_boot_setup(char *str);
 263
 264/*
 265 *      The DEVICE structure.
 266 *      Actually, this whole structure is a big mistake.  It mixes I/O
 267 *      data with strictly "high-level" data, and it has to know about
 268 *      almost every data structure used in the INET module.
 269 *
 270 *      FIXME: cleanup struct net_device such that network protocol info
 271 *      moves out.
 272 */
 273
 274struct net_device
 275{
 276
 277        /*
 278         * This is the first field of the "visible" part of this structure
 279         * (i.e. as seen by users in the "Space.c" file).  It is the name
 280         * the interface.
 281         */
 282        char                    name[IFNAMSIZ];
 283        /* device name hash chain */
 284        struct hlist_node       name_hlist;
 285
 286        /*
 287         *      I/O specific fields
 288         *      FIXME: Merge these and struct ifmap into one
 289         */
 290        unsigned long           mem_end;        /* shared mem end       */
 291        unsigned long           mem_start;      /* shared mem start     */
 292        unsigned long           base_addr;      /* device I/O address   */
 293        unsigned int            irq;            /* device IRQ number    */
 294
 295        /*
 296         *      Some hardware also needs these fields, but they are not
 297         *      part of the usual set specified in Space.c.
 298         */
 299
 300        unsigned char           if_port;        /* Selectable AUI, TP,..*/
 301        unsigned char           dma;            /* DMA channel          */
 302
 303        unsigned long           state;
 304
 305        struct net_device       *next;
 306        
 307        /* The device initialization function. Called only once. */
 308        int                     (*init)(struct net_device *dev);
 309
 310        /* ------- Fields preinitialized in Space.c finish here ------- */
 311
 312        /* Net device features */
 313        unsigned long           features;
 314#define NETIF_F_SG              1       /* Scatter/gather IO. */
 315#define NETIF_F_IP_CSUM         2       /* Can checksum only TCP/UDP over IPv4. */
 316#define NETIF_F_NO_CSUM         4       /* Does not require checksum. F.e. loopack. */
 317#define NETIF_F_HW_CSUM         8       /* Can checksum all the packets. */
 318#define NETIF_F_HIGHDMA         32      /* Can DMA to high memory. */
 319#define NETIF_F_FRAGLIST        64      /* Scatter/gather IO. */
 320#define NETIF_F_HW_VLAN_TX      128     /* Transmit VLAN hw acceleration */
 321#define NETIF_F_HW_VLAN_RX      256     /* Receive VLAN hw acceleration */
 322#define NETIF_F_HW_VLAN_FILTER  512     /* Receive filtering on VLAN */
 323#define NETIF_F_VLAN_CHALLENGED 1024    /* Device cannot handle VLAN packets */
 324#define NETIF_F_GSO             2048    /* Enable software GSO. */
 325#define NETIF_F_LLTX            4096    /* LockLess TX */
 326
 327        /* Segmentation offload features */
 328#define NETIF_F_GSO_SHIFT       16
 329#define NETIF_F_GSO_MASK        0xffff0000
 330#define NETIF_F_TSO             (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
 331#define NETIF_F_UFO             (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
 332#define NETIF_F_GSO_ROBUST      (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
 333#define NETIF_F_TSO_ECN         (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
 334#define NETIF_F_TSO6            (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
 335
 336        /* List of features with software fallbacks. */
 337#define NETIF_F_GSO_SOFTWARE    (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
 338
 339#define NETIF_F_GEN_CSUM        (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
 340#define NETIF_F_ALL_CSUM        (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
 341
 342        struct net_device       *next_sched;
 343
 344        /* Interface index. Unique device identifier    */
 345        int                     ifindex;
 346        int                     iflink;
 347
 348
 349        struct net_device_stats* (*get_stats)(struct net_device *dev);
 350
 351        /* List of functions to handle Wireless Extensions (instead of ioctl).
 352         * See <net/iw_handler.h> for details. Jean II */
 353        const struct iw_handler_def *   wireless_handlers;
 354        /* Instance data managed by the core of Wireless Extensions. */
 355        struct iw_public_data * wireless_data;
 356
 357        const struct ethtool_ops *ethtool_ops;
 358
 359        /*
 360         * This marks the end of the "visible" part of the structure. All
 361         * fields hereafter are internal to the system, and may change at
 362         * will (read: may be cleaned up at will).
 363         */
 364
 365
 366        unsigned int            flags;  /* interface flags (a la BSD)   */
 367        unsigned short          gflags;
 368        unsigned short          priv_flags; /* Like 'flags' but invisible to userspace. */
 369        unsigned short          padded; /* How much padding added by alloc_netdev() */
 370
 371        unsigned char           operstate; /* RFC2863 operstate */
 372        unsigned char           link_mode; /* mapping policy to operstate */
 373
 374        unsigned                mtu;    /* interface MTU value          */
 375        unsigned short          type;   /* interface hardware type      */
 376        unsigned short          hard_header_len;        /* hardware hdr length  */
 377
 378        struct net_device       *master; /* Pointer to master device of a group,
 379                                          * which this device is member of.
 380                                          */
 381
 382        /* Interface address info. */
 383        unsigned char           perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
 384        unsigned char           addr_len;       /* hardware address length      */
 385        unsigned short          dev_id;         /* for shared network cards */
 386
 387        struct dev_mc_list      *mc_list;       /* Multicast mac addresses      */
 388        int                     mc_count;       /* Number of installed mcasts   */
 389        int                     promiscuity;
 390        int                     allmulti;
 391
 392
 393        /* Protocol specific pointers */
 394        
 395        void                    *atalk_ptr;     /* AppleTalk link       */
 396        void                    *ip_ptr;        /* IPv4 specific data   */  
 397        void                    *dn_ptr;        /* DECnet specific data */
 398        void                    *ip6_ptr;       /* IPv6 specific data */
 399        void                    *ec_ptr;        /* Econet specific data */
 400        void                    *ax25_ptr;      /* AX.25 specific data */
 401
 402/*
 403 * Cache line mostly used on receive path (including eth_type_trans())
 404 */
 405        struct list_head        poll_list ____cacheline_aligned_in_smp;
 406                                        /* Link to poll list    */
 407
 408        int                     (*poll) (struct net_device *dev, int *quota);
 409        int                     quota;
 410        int                     weight;
 411        unsigned long           last_rx;        /* Time of last Rx      */
 412        /* Interface address info used in eth_type_trans() */
 413        unsigned char           dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast 
 414                                                        because most packets are unicast) */
 415
 416        unsigned char           broadcast[MAX_ADDR_LEN];        /* hw bcast add */
 417
 418/*
 419 * Cache line mostly used on queue transmit path (qdisc)
 420 */
 421        /* device queue lock */
 422        spinlock_t              queue_lock ____cacheline_aligned_in_smp;
 423        struct Qdisc            *qdisc;
 424        struct Qdisc            *qdisc_sleeping;
 425        struct list_head        qdisc_list;
 426        unsigned long           tx_queue_len;   /* Max frames per queue allowed */
 427
 428        /* Partially transmitted GSO packet. */
 429        struct sk_buff          *gso_skb;
 430
 431        /* ingress path synchronizer */
 432        spinlock_t              ingress_lock;
 433        struct Qdisc            *qdisc_ingress;
 434
 435/*
 436 * One part is mostly used on xmit path (device)
 437 */
 438        /* hard_start_xmit synchronizer */
 439        spinlock_t              _xmit_lock ____cacheline_aligned_in_smp;
 440        /* cpu id of processor entered to hard_start_xmit or -1,
 441           if nobody entered there.
 442         */
 443        int                     xmit_lock_owner;
 444        void                    *priv;  /* pointer to private data      */
 445        int                     (*hard_start_xmit) (struct sk_buff *skb,
 446                                                    struct net_device *dev);
 447        /* These may be needed for future network-power-down code. */
 448        unsigned long           trans_start;    /* Time (in jiffies) of last Tx */
 449
 450        int                     watchdog_timeo; /* used by dev_watchdog() */
 451        struct timer_list       watchdog_timer;
 452
 453/*
 454 * refcnt is a very hot point, so align it on SMP
 455 */
 456        /* Number of references to this device */
 457        atomic_t                refcnt ____cacheline_aligned_in_smp;
 458
 459        /* delayed register/unregister */
 460        struct list_head        todo_list;
 461        /* device index hash chain */
 462        struct hlist_node       index_hlist;
 463
 464        /* register/unregister state machine */
 465        enum { NETREG_UNINITIALIZED=0,
 466               NETREG_REGISTERED,       /* completed register_netdevice */
 467               NETREG_UNREGISTERING,    /* called unregister_netdevice */
 468               NETREG_UNREGISTERED,     /* completed unregister todo */
 469               NETREG_RELEASED,         /* called free_netdev */
 470        } reg_state;
 471
 472        /* Called after device is detached from network. */
 473        void                    (*uninit)(struct net_device *dev);
 474        /* Called after last user reference disappears. */
 475        void                    (*destructor)(struct net_device *dev);
 476
 477        /* Pointers to interface service routines.      */
 478        int                     (*open)(struct net_device *dev);
 479        int                     (*stop)(struct net_device *dev);
 480#define HAVE_NETDEV_POLL
 481        int                     (*hard_header) (struct sk_buff *skb,
 482                                                struct net_device *dev,
 483                                                unsigned short type,
 484                                                void *daddr,
 485                                                void *saddr,
 486                                                unsigned len);
 487        int                     (*rebuild_header)(struct sk_buff *skb);
 488#define HAVE_MULTICAST                   
 489        void                    (*set_multicast_list)(struct net_device *dev);
 490#define HAVE_SET_MAC_ADDR                
 491        int                     (*set_mac_address)(struct net_device *dev,
 492                                                   void *addr);
 493#define HAVE_PRIVATE_IOCTL
 494        int                     (*do_ioctl)(struct net_device *dev,
 495                                            struct ifreq *ifr, int cmd);
 496#define HAVE_SET_CONFIG
 497        int                     (*set_config)(struct net_device *dev,
 498                                              struct ifmap *map);
 499#define HAVE_HEADER_CACHE
 500        int                     (*hard_header_cache)(struct neighbour *neigh,
 501                                                     struct hh_cache *hh);
 502        void                    (*header_cache_update)(struct hh_cache *hh,
 503                                                       struct net_device *dev,
 504                                                       unsigned char *  haddr);
 505#define HAVE_CHANGE_MTU
 506        int                     (*change_mtu)(struct net_device *dev, int new_mtu);
 507
 508#define HAVE_TX_TIMEOUT
 509        void                    (*tx_timeout) (struct net_device *dev);
 510
 511        void                    (*vlan_rx_register)(struct net_device *dev,
 512                                                    struct vlan_group *grp);
 513        void                    (*vlan_rx_add_vid)(struct net_device *dev,
 514                                                   unsigned short vid);
 515        void                    (*vlan_rx_kill_vid)(struct net_device *dev,
 516                                                    unsigned short vid);
 517
 518        int                     (*hard_header_parse)(struct sk_buff *skb,
 519                                                     unsigned char *haddr);
 520        int                     (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
 521#ifdef CONFIG_NETPOLL
 522        struct netpoll_info     *npinfo;
 523#endif
 524#ifdef CONFIG_NET_POLL_CONTROLLER
 525        void                    (*poll_controller)(struct net_device *dev);
 526#endif
 527
 528        /* bridge stuff */
 529        struct net_bridge_port  *br_port;
 530
 531        /* class/net/name entry */
 532        struct class_device     class_dev;
 533        /* space for optional statistics and wireless sysfs groups */
 534        struct attribute_group  *sysfs_groups[3];
 535};
 536
 537#define NETDEV_ALIGN            32
 538#define NETDEV_ALIGN_CONST      (NETDEV_ALIGN - 1)
 539
 540static inline void *netdev_priv(struct net_device *dev)
 541{
 542        return (char *)dev + ((sizeof(struct net_device)
 543                                        + NETDEV_ALIGN_CONST)
 544                                & ~NETDEV_ALIGN_CONST);
 545}
 546
 547#define SET_MODULE_OWNER(dev) do { } while (0)
 548/* Set the sysfs physical device reference for the network logical device
 549 * if set prior to registration will cause a symlink during initialization.
 550 */
 551#define SET_NETDEV_DEV(net, pdev)       ((net)->class_dev.dev = (pdev))
 552
 553struct packet_type {
 554        __be16                  type;   /* This is really htons(ether_type). */
 555        struct net_device       *dev;   /* NULL is wildcarded here           */
 556        int                     (*func) (struct sk_buff *,
 557                                         struct net_device *,
 558                                         struct packet_type *,
 559                                         struct net_device *);
 560        struct sk_buff          *(*gso_segment)(struct sk_buff *skb,
 561                                                int features);
 562        int                     (*gso_send_check)(struct sk_buff *skb);
 563        void                    *af_packet_priv;
 564        struct list_head        list;
 565};
 566
 567#include <linux/interrupt.h>
 568#include <linux/notifier.h>
 569
 570extern struct net_device                loopback_dev;           /* The loopback */
 571extern struct net_device                *dev_base;              /* All devices */
 572extern rwlock_t                         dev_base_lock;          /* Device list lock */
 573
 574extern int                      netdev_boot_setup_check(struct net_device *dev);
 575extern unsigned long            netdev_boot_base(const char *prefix, int unit);
 576extern struct net_device    *dev_getbyhwaddr(unsigned short type, char *hwaddr);
 577extern struct net_device *dev_getfirstbyhwtype(unsigned short type);
 578extern void             dev_add_pack(struct packet_type *pt);
 579extern void             dev_remove_pack(struct packet_type *pt);
 580extern void             __dev_remove_pack(struct packet_type *pt);
 581
 582extern struct net_device        *dev_get_by_flags(unsigned short flags,
 583                                                  unsigned short mask);
 584extern struct net_device        *dev_get_by_name(const char *name);
 585extern struct net_device        *__dev_get_by_name(const char *name);
 586extern int              dev_alloc_name(struct net_device *dev, const char *name);
 587extern int              dev_open(struct net_device *dev);
 588extern int              dev_close(struct net_device *dev);
 589extern int              dev_queue_xmit(struct sk_buff *skb);
 590extern int              register_netdevice(struct net_device *dev);
 591extern int              unregister_netdevice(struct net_device *dev);
 592extern void             free_netdev(struct net_device *dev);
 593extern void             synchronize_net(void);
 594extern int              register_netdevice_notifier(struct notifier_block *nb);
 595extern int              unregister_netdevice_notifier(struct notifier_block *nb);
 596extern int              call_netdevice_notifiers(unsigned long val, void *v);
 597extern struct net_device        *dev_get_by_index(int ifindex);
 598extern struct net_device        *__dev_get_by_index(int ifindex);
 599extern int              dev_restart(struct net_device *dev);
 600#ifdef CONFIG_NETPOLL_TRAP
 601extern int              netpoll_trap(void);
 602#endif
 603
 604typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
 605extern int              register_gifconf(unsigned int family, gifconf_func_t * gifconf);
 606static inline int unregister_gifconf(unsigned int family)
 607{
 608        return register_gifconf(family, NULL);
 609}
 610
 611/*
 612 * Incoming packets are placed on per-cpu queues so that
 613 * no locking is needed.
 614 */
 615
 616struct softnet_data
 617{
 618        struct net_device       *output_queue;
 619        struct sk_buff_head     input_pkt_queue;
 620        struct list_head        poll_list;
 621        struct sk_buff          *completion_queue;
 622
 623        struct net_device       backlog_dev;    /* Sorry. 8) */
 624#ifdef CONFIG_NET_DMA
 625        struct dma_chan         *net_dma;
 626#endif
 627};
 628
 629DECLARE_PER_CPU(struct softnet_data,softnet_data);
 630
 631#define HAVE_NETIF_QUEUE
 632
 633extern void __netif_schedule(struct net_device *dev);
 634
 635static inline void netif_schedule(struct net_device *dev)
 636{
 637        if (!test_bit(__LINK_STATE_XOFF, &dev->state))
 638                __netif_schedule(dev);
 639}
 640
 641static inline void netif_start_queue(struct net_device *dev)
 642{
 643        clear_bit(__LINK_STATE_XOFF, &dev->state);
 644}
 645
 646static inline void netif_wake_queue(struct net_device *dev)
 647{
 648#ifdef CONFIG_NETPOLL_TRAP
 649        if (netpoll_trap())
 650                return;
 651#endif
 652        if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))
 653                __netif_schedule(dev);
 654}
 655
 656static inline void netif_stop_queue(struct net_device *dev)
 657{
 658#ifdef CONFIG_NETPOLL_TRAP
 659        if (netpoll_trap())
 660                return;
 661#endif
 662        set_bit(__LINK_STATE_XOFF, &dev->state);
 663}
 664
 665static inline int netif_queue_stopped(const struct net_device *dev)
 666{
 667        return test_bit(__LINK_STATE_XOFF, &dev->state);
 668}
 669
 670static inline int netif_running(const struct net_device *dev)
 671{
 672        return test_bit(__LINK_STATE_START, &dev->state);
 673}
 674
 675
 676/* Use this variant when it is known for sure that it
 677 * is executing from interrupt context.
 678 */
 679static inline void dev_kfree_skb_irq(struct sk_buff *skb)
 680{
 681        if (atomic_dec_and_test(&skb->users)) {
 682                struct softnet_data *sd;
 683                unsigned long flags;
 684
 685                local_irq_save(flags);
 686                sd = &__get_cpu_var(softnet_data);
 687                skb->next = sd->completion_queue;
 688                sd->completion_queue = skb;
 689                raise_softirq_irqoff(NET_TX_SOFTIRQ);
 690                local_irq_restore(flags);
 691        }
 692}
 693
 694/* Use this variant in places where it could be invoked
 695 * either from interrupt or non-interrupt context.
 696 */
 697extern void dev_kfree_skb_any(struct sk_buff *skb);
 698
 699#define HAVE_NETIF_RX 1
 700extern int              netif_rx(struct sk_buff *skb);
 701extern int              netif_rx_ni(struct sk_buff *skb);
 702#define HAVE_NETIF_RECEIVE_SKB 1
 703extern int              netif_receive_skb(struct sk_buff *skb);
 704extern int              dev_valid_name(const char *name);
 705extern int              dev_ioctl(unsigned int cmd, void __user *);
 706extern int              dev_ethtool(struct ifreq *);
 707extern unsigned         dev_get_flags(const struct net_device *);
 708extern int              dev_change_flags(struct net_device *, unsigned);
 709extern int              dev_change_name(struct net_device *, char *);
 710extern int              dev_set_mtu(struct net_device *, int);
 711extern int              dev_set_mac_address(struct net_device *,
 712                                            struct sockaddr *);
 713extern int              dev_hard_start_xmit(struct sk_buff *skb,
 714                                            struct net_device *dev);
 715
 716extern void             dev_init(void);
 717
 718extern int              netdev_budget;
 719
 720/* Called by rtnetlink.c:rtnl_unlock() */
 721extern void netdev_run_todo(void);
 722
 723static inline void dev_put(struct net_device *dev)
 724{
 725        atomic_dec(&dev->refcnt);
 726}
 727
 728static inline void dev_hold(struct net_device *dev)
 729{
 730        atomic_inc(&dev->refcnt);
 731}
 732
 733/* Carrier loss detection, dial on demand. The functions netif_carrier_on
 734 * and _off may be called from IRQ context, but it is caller
 735 * who is responsible for serialization of these calls.
 736 *
 737 * The name carrier is inappropriate, these functions should really be
 738 * called netif_lowerlayer_*() because they represent the state of any
 739 * kind of lower layer not just hardware media.
 740 */
 741
 742extern void linkwatch_fire_event(struct net_device *dev);
 743
 744static inline int netif_carrier_ok(const struct net_device *dev)
 745{
 746        return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
 747}
 748
 749extern void __netdev_watchdog_up(struct net_device *dev);
 750
 751extern void netif_carrier_on(struct net_device *dev);
 752
 753extern void netif_carrier_off(struct net_device *dev);
 754
 755static inline void netif_dormant_on(struct net_device *dev)
 756{
 757        if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
 758                linkwatch_fire_event(dev);
 759}
 760
 761static inline void netif_dormant_off(struct net_device *dev)
 762{
 763        if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
 764                linkwatch_fire_event(dev);
 765}
 766
 767static inline int netif_dormant(const struct net_device *dev)
 768{
 769        return test_bit(__LINK_STATE_DORMANT, &dev->state);
 770}
 771
 772
 773static inline int netif_oper_up(const struct net_device *dev) {
 774        return (dev->operstate == IF_OPER_UP ||
 775                dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
 776}
 777
 778/* Hot-plugging. */
 779static inline int netif_device_present(struct net_device *dev)
 780{
 781        return test_bit(__LINK_STATE_PRESENT, &dev->state);
 782}
 783
 784extern void netif_device_detach(struct net_device *dev);
 785
 786extern void netif_device_attach(struct net_device *dev);
 787
 788/*
 789 * Network interface message level settings
 790 */
 791#define HAVE_NETIF_MSG 1
 792
 793enum {
 794        NETIF_MSG_DRV           = 0x0001,
 795        NETIF_MSG_PROBE         = 0x0002,
 796        NETIF_MSG_LINK          = 0x0004,
 797        NETIF_MSG_TIMER         = 0x0008,
 798        NETIF_MSG_IFDOWN        = 0x0010,
 799        NETIF_MSG_IFUP          = 0x0020,
 800        NETIF_MSG_RX_ERR        = 0x0040,
 801        NETIF_MSG_TX_ERR        = 0x0080,
 802        NETIF_MSG_TX_QUEUED     = 0x0100,
 803        NETIF_MSG_INTR          = 0x0200,
 804        NETIF_MSG_TX_DONE       = 0x0400,
 805        NETIF_MSG_RX_STATUS     = 0x0800,
 806        NETIF_MSG_PKTDATA       = 0x1000,
 807        NETIF_MSG_HW            = 0x2000,
 808        NETIF_MSG_WOL           = 0x4000,
 809};
 810
 811#define netif_msg_drv(p)        ((p)->msg_enable & NETIF_MSG_DRV)
 812#define netif_msg_probe(p)      ((p)->msg_enable & NETIF_MSG_PROBE)
 813#define netif_msg_link(p)       ((p)->msg_enable & NETIF_MSG_LINK)
 814#define netif_msg_timer(p)      ((p)->msg_enable & NETIF_MSG_TIMER)
 815#define netif_msg_ifdown(p)     ((p)->msg_enable & NETIF_MSG_IFDOWN)
 816#define netif_msg_ifup(p)       ((p)->msg_enable & NETIF_MSG_IFUP)
 817#define netif_msg_rx_err(p)     ((p)->msg_enable & NETIF_MSG_RX_ERR)
 818#define netif_msg_tx_err(p)     ((p)->msg_enable & NETIF_MSG_TX_ERR)
 819#define netif_msg_tx_queued(p)  ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
 820#define netif_msg_intr(p)       ((p)->msg_enable & NETIF_MSG_INTR)
 821#define netif_msg_tx_done(p)    ((p)->msg_enable & NETIF_MSG_TX_DONE)
 822#define netif_msg_rx_status(p)  ((p)->msg_enable & NETIF_MSG_RX_STATUS)
 823#define netif_msg_pktdata(p)    ((p)->msg_enable & NETIF_MSG_PKTDATA)
 824#define netif_msg_hw(p)         ((p)->msg_enable & NETIF_MSG_HW)
 825#define netif_msg_wol(p)        ((p)->msg_enable & NETIF_MSG_WOL)
 826
 827static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
 828{
 829        /* use default */
 830        if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
 831                return default_msg_enable_bits;
 832        if (debug_value == 0)   /* no output */
 833                return 0;
 834        /* set low N bits */
 835        return (1 << debug_value) - 1;
 836}
 837
 838/* Test if receive needs to be scheduled */
 839static inline int __netif_rx_schedule_prep(struct net_device *dev)
 840{
 841        return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state);
 842}
 843
 844/* Test if receive needs to be scheduled but only if up */
 845static inline int netif_rx_schedule_prep(struct net_device *dev)
 846{
 847        return netif_running(dev) && __netif_rx_schedule_prep(dev);
 848}
 849
 850/* Add interface to tail of rx poll list. This assumes that _prep has
 851 * already been called and returned 1.
 852 */
 853
 854extern void __netif_rx_schedule(struct net_device *dev);
 855
 856/* Try to reschedule poll. Called by irq handler. */
 857
 858static inline void netif_rx_schedule(struct net_device *dev)
 859{
 860        if (netif_rx_schedule_prep(dev))
 861                __netif_rx_schedule(dev);
 862}
 863
 864/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete().
 865 * Do not inline this?
 866 */
 867static inline int netif_rx_reschedule(struct net_device *dev, int undo)
 868{
 869        if (netif_rx_schedule_prep(dev)) {
 870                unsigned long flags;
 871
 872                dev->quota += undo;
 873
 874                local_irq_save(flags);
 875                list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
 876                __raise_softirq_irqoff(NET_RX_SOFTIRQ);
 877                local_irq_restore(flags);
 878                return 1;
 879        }
 880        return 0;
 881}
 882
 883/* Remove interface from poll list: it must be in the poll list
 884 * on current cpu. This primitive is called by dev->poll(), when
 885 * it completes the work. The device cannot be out of poll list at this
 886 * moment, it is BUG().
 887 */
 888static inline void netif_rx_complete(struct net_device *dev)
 889{
 890        unsigned long flags;
 891
 892        local_irq_save(flags);
 893        BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state));
 894        list_del(&dev->poll_list);
 895        smp_mb__before_clear_bit();
 896        clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
 897        local_irq_restore(flags);
 898}
 899
 900static inline void netif_poll_disable(struct net_device *dev)
 901{
 902        while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state))
 903                /* No hurry. */
 904                schedule_timeout_interruptible(1);
 905}
 906
 907static inline void netif_poll_enable(struct net_device *dev)
 908{
 909        smp_mb__before_clear_bit();
 910        clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
 911}
 912
 913/* same as netif_rx_complete, except that local_irq_save(flags)
 914 * has already been issued
 915 */
 916static inline void __netif_rx_complete(struct net_device *dev)
 917{
 918        BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state));
 919        list_del(&dev->poll_list);
 920        smp_mb__before_clear_bit();
 921        clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
 922}
 923
 924static inline void netif_tx_lock(struct net_device *dev)
 925{
 926        spin_lock(&dev->_xmit_lock);
 927        dev->xmit_lock_owner = smp_processor_id();
 928}
 929
 930static inline void netif_tx_lock_bh(struct net_device *dev)
 931{
 932        spin_lock_bh(&dev->_xmit_lock);
 933        dev->xmit_lock_owner = smp_processor_id();
 934}
 935
 936static inline int netif_tx_trylock(struct net_device *dev)
 937{
 938        int ok = spin_trylock(&dev->_xmit_lock);
 939        if (likely(ok))
 940                dev->xmit_lock_owner = smp_processor_id();
 941        return ok;
 942}
 943
 944static inline void netif_tx_unlock(struct net_device *dev)
 945{
 946        dev->xmit_lock_owner = -1;
 947        spin_unlock(&dev->_xmit_lock);
 948}
 949
 950static inline void netif_tx_unlock_bh(struct net_device *dev)
 951{
 952        dev->xmit_lock_owner = -1;
 953        spin_unlock_bh(&dev->_xmit_lock);
 954}
 955
 956static inline void netif_tx_disable(struct net_device *dev)
 957{
 958        netif_tx_lock_bh(dev);
 959        netif_stop_queue(dev);
 960        netif_tx_unlock_bh(dev);
 961}
 962
 963/* These functions live elsewhere (drivers/net/net_init.c, but related) */
 964
 965extern void             ether_setup(struct net_device *dev);
 966
 967/* Support for loadable net-drivers */
 968extern struct net_device *alloc_netdev(int sizeof_priv, const char *name,
 969                                       void (*setup)(struct net_device *));
 970extern int              register_netdev(struct net_device *dev);
 971extern void             unregister_netdev(struct net_device *dev);
 972/* Functions used for multicast support */
 973extern void             dev_mc_upload(struct net_device *dev);
 974extern int              dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
 975extern int              dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
 976extern void             dev_mc_discard(struct net_device *dev);
 977extern void             dev_set_promiscuity(struct net_device *dev, int inc);
 978extern void             dev_set_allmulti(struct net_device *dev, int inc);
 979extern void             netdev_state_change(struct net_device *dev);
 980extern void             netdev_features_change(struct net_device *dev);
 981/* Load a device via the kmod */
 982extern void             dev_load(const char *name);
 983extern void             dev_mcast_init(void);
 984extern int              netdev_max_backlog;
 985extern int              weight_p;
 986extern int              netdev_set_master(struct net_device *dev, struct net_device *master);
 987extern int skb_checksum_help(struct sk_buff *skb);
 988extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
 989#ifdef CONFIG_BUG
 990extern void netdev_rx_csum_fault(struct net_device *dev);
 991#else
 992static inline void netdev_rx_csum_fault(struct net_device *dev)
 993{
 994}
 995#endif
 996/* rx skb timestamps */
 997extern void             net_enable_timestamp(void);
 998extern void             net_disable_timestamp(void);
 999
1000#ifdef CONFIG_PROC_FS
1001extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
1002extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
1003extern void dev_seq_stop(struct seq_file *seq, void *v);
1004#endif
1005
1006extern void linkwatch_run_queue(void);
1007
1008static inline int net_gso_ok(int features, int gso_type)
1009{
1010        int feature = gso_type << NETIF_F_GSO_SHIFT;
1011        return (features & feature) == feature;
1012}
1013
1014static inline int skb_gso_ok(struct sk_buff *skb, int features)
1015{
1016        return net_gso_ok(features, skb_shinfo(skb)->gso_type);
1017}
1018
1019static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
1020{
1021        return skb_is_gso(skb) &&
1022               (!skb_gso_ok(skb, dev->features) ||
1023                unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
1024}
1025
1026/* On bonding slaves other than the currently active slave, suppress
1027 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
1028 * ARP on active-backup slaves with arp_validate enabled.
1029 */
1030static inline int skb_bond_should_drop(struct sk_buff *skb)
1031{
1032        struct net_device *dev = skb->dev;
1033        struct net_device *master = dev->master;
1034
1035        if (master &&
1036            (dev->priv_flags & IFF_SLAVE_INACTIVE)) {
1037                if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
1038                    skb->protocol == __constant_htons(ETH_P_ARP))
1039                        return 0;
1040
1041                if (master->priv_flags & IFF_MASTER_ALB) {
1042                        if (skb->pkt_type != PACKET_BROADCAST &&
1043                            skb->pkt_type != PACKET_MULTICAST)
1044                                return 0;
1045                }
1046                if (master->priv_flags & IFF_MASTER_8023AD &&
1047                    skb->protocol == __constant_htons(ETH_P_SLOW))
1048                        return 0;
1049
1050                return 1;
1051        }
1052        return 0;
1053}
1054
1055#endif /* __KERNEL__ */
1056
1057#endif  /* _LINUX_DEV_H */
1058
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.