linux/include/linux/skbuff.h
<<
>>
Prefs
   1/*
   2 *      Definitions for the 'struct sk_buff' memory handlers.
   3 *
   4 *      Authors:
   5 *              Alan Cox, <gw4pts@gw4pts.ampr.org>
   6 *              Florian La Roche, <rzsfl@rz.uni-sb.de>
   7 *
   8 *      This program is free software; you can redistribute it and/or
   9 *      modify it under the terms of the GNU General Public License
  10 *      as published by the Free Software Foundation; either version
  11 *      2 of the License, or (at your option) any later version.
  12 */
  13
  14#ifndef _LINUX_SKBUFF_H
  15#define _LINUX_SKBUFF_H
  16
  17#include <linux/kernel.h>
  18#include <linux/kmemcheck.h>
  19#include <linux/compiler.h>
  20#include <linux/time.h>
  21#include <linux/bug.h>
  22#include <linux/cache.h>
  23
  24#include <linux/atomic.h>
  25#include <asm/types.h>
  26#include <linux/spinlock.h>
  27#include <linux/net.h>
  28#include <linux/textsearch.h>
  29#include <net/checksum.h>
  30#include <linux/rcupdate.h>
  31#include <linux/dmaengine.h>
  32#include <linux/hrtimer.h>
  33#include <linux/dma-mapping.h>
  34#include <linux/netdev_features.h>
  35
  36/* Don't change this without changing skb_csum_unnecessary! */
  37#define CHECKSUM_NONE 0
  38#define CHECKSUM_UNNECESSARY 1
  39#define CHECKSUM_COMPLETE 2
  40#define CHECKSUM_PARTIAL 3
  41
  42#define SKB_DATA_ALIGN(X)       (((X) + (SMP_CACHE_BYTES - 1)) & \
  43                                 ~(SMP_CACHE_BYTES - 1))
  44#define SKB_WITH_OVERHEAD(X)    \
  45        ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  46#define SKB_MAX_ORDER(X, ORDER) \
  47        SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
  48#define SKB_MAX_HEAD(X)         (SKB_MAX_ORDER((X), 0))
  49#define SKB_MAX_ALLOC           (SKB_MAX_ORDER(0, 2))
  50
  51/* return minimum truesize of one skb containing X bytes of data */
  52#define SKB_TRUESIZE(X) ((X) +                                          \
  53                         SKB_DATA_ALIGN(sizeof(struct sk_buff)) +       \
  54                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  55
  56/* A. Checksumming of received packets by device.
  57 *
  58 *      NONE: device failed to checksum this packet.
  59 *              skb->csum is undefined.
  60 *
  61 *      UNNECESSARY: device parsed packet and wouldbe verified checksum.
  62 *              skb->csum is undefined.
  63 *            It is bad option, but, unfortunately, many of vendors do this.
  64 *            Apparently with secret goal to sell you new device, when you
  65 *            will add new protocol to your host. F.e. IPv6. 8)
  66 *
  67 *      COMPLETE: the most generic way. Device supplied checksum of _all_
  68 *          the packet as seen by netif_rx in skb->csum.
  69 *          NOTE: Even if device supports only some protocols, but
  70 *          is able to produce some skb->csum, it MUST use COMPLETE,
  71 *          not UNNECESSARY.
  72 *
  73 *      PARTIAL: identical to the case for output below.  This may occur
  74 *          on a packet received directly from another Linux OS, e.g.,
  75 *          a virtualised Linux kernel on the same host.  The packet can
  76 *          be treated in the same way as UNNECESSARY except that on
  77 *          output (i.e., forwarding) the checksum must be filled in
  78 *          by the OS or the hardware.
  79 *
  80 * B. Checksumming on output.
  81 *
  82 *      NONE: skb is checksummed by protocol or csum is not required.
  83 *
  84 *      PARTIAL: device is required to csum packet as seen by hard_start_xmit
  85 *      from skb->csum_start to the end and to record the checksum
  86 *      at skb->csum_start + skb->csum_offset.
  87 *
  88 *      Device must show its capabilities in dev->features, set
  89 *      at device setup time.
  90 *      NETIF_F_HW_CSUM - it is clever device, it is able to checksum
  91 *                        everything.
  92 *      NETIF_F_IP_CSUM - device is dumb. It is able to csum only
  93 *                        TCP/UDP over IPv4. Sigh. Vendors like this
  94 *                        way by an unknown reason. Though, see comment above
  95 *                        about CHECKSUM_UNNECESSARY. 8)
  96 *      NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
  97 *
  98 *      UNNECESSARY: device will do per protocol specific csum. Protocol drivers
  99 *      that do not want net to perform the checksum calculation should use
 100 *      this flag in their outgoing skbs.
 101 *      NETIF_F_FCOE_CRC  this indicates the device can do FCoE FC CRC
 102 *                        offload. Correspondingly, the FCoE protocol driver
 103 *                        stack should use CHECKSUM_UNNECESSARY.
 104 *
 105 *      Any questions? No questions, good.              --ANK
 106 */
 107
 108struct net_device;
 109struct scatterlist;
 110struct pipe_inode_info;
 111
 112#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 113struct nf_conntrack {
 114        atomic_t use;
 115};
 116#endif
 117
 118#ifdef CONFIG_BRIDGE_NETFILTER
 119struct nf_bridge_info {
 120        atomic_t                use;
 121        unsigned int            mask;
 122        struct net_device       *physindev;
 123        struct net_device       *physoutdev;
 124        unsigned long           data[32 / sizeof(unsigned long)];
 125};
 126#endif
 127
 128struct sk_buff_head {
 129        /* These two members must be first. */
 130        struct sk_buff  *next;
 131        struct sk_buff  *prev;
 132
 133        __u32           qlen;
 134        spinlock_t      lock;
 135};
 136
 137struct sk_buff;
 138
 139/* To allow 64K frame to be packed as single skb without frag_list we
 140 * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
 141 * buffers which do not start on a page boundary.
 142 *
 143 * Since GRO uses frags we allocate at least 16 regardless of page
 144 * size.
 145 */
 146#if (65536/PAGE_SIZE + 1) < 16
 147#define MAX_SKB_FRAGS 16UL
 148#else
 149#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
 150#endif
 151
 152typedef struct skb_frag_struct skb_frag_t;
 153
 154struct skb_frag_struct {
 155        struct {
 156                struct page *p;
 157        } page;
 158#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
 159        __u32 page_offset;
 160        __u32 size;
 161#else
 162        __u16 page_offset;
 163        __u16 size;
 164#endif
 165};
 166
 167static inline unsigned int skb_frag_size(const skb_frag_t *frag)
 168{
 169        return frag->size;
 170}
 171
 172static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
 173{
 174        frag->size = size;
 175}
 176
 177static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
 178{
 179        frag->size += delta;
 180}
 181
 182static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
 183{
 184        frag->size -= delta;
 185}
 186
 187#define HAVE_HW_TIME_STAMP
 188
 189/**
 190 * struct skb_shared_hwtstamps - hardware time stamps
 191 * @hwtstamp:   hardware time stamp transformed into duration
 192 *              since arbitrary point in time
 193 * @syststamp:  hwtstamp transformed to system time base
 194 *
 195 * Software time stamps generated by ktime_get_real() are stored in
 196 * skb->tstamp. The relation between the different kinds of time
 197 * stamps is as follows:
 198 *
 199 * syststamp and tstamp can be compared against each other in
 200 * arbitrary combinations.  The accuracy of a
 201 * syststamp/tstamp/"syststamp from other device" comparison is
 202 * limited by the accuracy of the transformation into system time
 203 * base. This depends on the device driver and its underlying
 204 * hardware.
 205 *
 206 * hwtstamps can only be compared against other hwtstamps from
 207 * the same device.
 208 *
 209 * This structure is attached to packets as part of the
 210 * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
 211 */
 212struct skb_shared_hwtstamps {
 213        ktime_t hwtstamp;
 214        ktime_t syststamp;
 215};
 216
 217/* Definitions for tx_flags in struct skb_shared_info */
 218enum {
 219        /* generate hardware time stamp */
 220        SKBTX_HW_TSTAMP = 1 << 0,
 221
 222        /* generate software time stamp */
 223        SKBTX_SW_TSTAMP = 1 << 1,
 224
 225        /* device driver is going to provide hardware time stamp */
 226        SKBTX_IN_PROGRESS = 1 << 2,
 227
 228        /* device driver supports TX zero-copy buffers */
 229        SKBTX_DEV_ZEROCOPY = 1 << 3,
 230
 231        /* generate wifi status information (where possible) */
 232        SKBTX_WIFI_STATUS = 1 << 4,
 233};
 234
 235/*
 236 * The callback notifies userspace to release buffers when skb DMA is done in
 237 * lower device, the skb last reference should be 0 when calling this.
 238 * The ctx field is used to track device context.
 239 * The desc field is used to track userspace buffer index.
 240 */
 241struct ubuf_info {
 242        void (*callback)(struct ubuf_info *);
 243        void *ctx;
 244        unsigned long desc;
 245};
 246
 247/* This data is invariant across clones and lives at
 248 * the end of the header data, ie. at skb->end.
 249 */
 250struct skb_shared_info {
 251        unsigned char   nr_frags;
 252        __u8            tx_flags;
 253        unsigned short  gso_size;
 254        /* Warning: this field is not always filled in (UFO)! */
 255        unsigned short  gso_segs;
 256        unsigned short  gso_type;
 257        struct sk_buff  *frag_list;
 258        struct skb_shared_hwtstamps hwtstamps;
 259        __be32          ip6_frag_id;
 260
 261        /*
 262         * Warning : all fields before dataref are cleared in __alloc_skb()
 263         */
 264        atomic_t        dataref;
 265
 266        /* Intermediate layers must ensure that destructor_arg
 267         * remains valid until skb destructor */
 268        void *          destructor_arg;
 269
 270        /* must be last field, see pskb_expand_head() */
 271        skb_frag_t      frags[MAX_SKB_FRAGS];
 272};
 273
 274/* We divide dataref into two halves.  The higher 16 bits hold references
 275 * to the payload part of skb->data.  The lower 16 bits hold references to
 276 * the entire skb->data.  A clone of a headerless skb holds the length of
 277 * the header in skb->hdr_len.
 278 *
 279 * All users must obey the rule that the skb->data reference count must be
 280 * greater than or equal to the payload reference count.
 281 *
 282 * Holding a reference to the payload part means that the user does not
 283 * care about modifications to the header part of skb->data.
 284 */
 285#define SKB_DATAREF_SHIFT 16
 286#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
 287
 288
 289enum {
 290        SKB_FCLONE_UNAVAILABLE,
 291        SKB_FCLONE_ORIG,
 292        SKB_FCLONE_CLONE,
 293};
 294
 295enum {
 296        SKB_GSO_TCPV4 = 1 << 0,
 297        SKB_GSO_UDP = 1 << 1,
 298
 299        /* This indicates the skb is from an untrusted source. */
 300        SKB_GSO_DODGY = 1 << 2,
 301
 302        /* This indicates the tcp segment has CWR set. */
 303        SKB_GSO_TCP_ECN = 1 << 3,
 304
 305        SKB_GSO_TCPV6 = 1 << 4,
 306
 307        SKB_GSO_FCOE = 1 << 5,
 308};
 309
 310#if BITS_PER_LONG > 32
 311#define NET_SKBUFF_DATA_USES_OFFSET 1
 312#endif
 313
 314#ifdef NET_SKBUFF_DATA_USES_OFFSET
 315typedef unsigned int sk_buff_data_t;
 316#else
 317typedef unsigned char *sk_buff_data_t;
 318#endif
 319
 320#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
 321    defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
 322#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
 323#endif
 324
 325/** 
 326 *      struct sk_buff - socket buffer
 327 *      @next: Next buffer in list
 328 *      @prev: Previous buffer in list
 329 *      @tstamp: Time we arrived
 330 *      @sk: Socket we are owned by
 331 *      @dev: Device we arrived on/are leaving by
 332 *      @cb: Control buffer. Free for use by every layer. Put private vars here
 333 *      @_skb_refdst: destination entry (with norefcount bit)
 334 *      @sp: the security path, used for xfrm
 335 *      @len: Length of actual data
 336 *      @data_len: Data length
 337 *      @mac_len: Length of link layer header
 338 *      @hdr_len: writable header length of cloned skb
 339 *      @csum: Checksum (must include start/offset pair)
 340 *      @csum_start: Offset from skb->head where checksumming should start
 341 *      @csum_offset: Offset from csum_start where checksum should be stored
 342 *      @priority: Packet queueing priority
 343 *      @local_df: allow local fragmentation
 344 *      @cloned: Head may be cloned (check refcnt to be sure)
 345 *      @ip_summed: Driver fed us an IP checksum
 346 *      @nohdr: Payload reference only, must not modify header
 347 *      @nfctinfo: Relationship of this skb to the connection
 348 *      @pkt_type: Packet class
 349 *      @fclone: skbuff clone status
 350 *      @ipvs_property: skbuff is owned by ipvs
 351 *      @peeked: this packet has been seen already, so stats have been
 352 *              done for it, don't do them again
 353 *      @nf_trace: netfilter packet trace flag
 354 *      @protocol: Packet protocol from driver
 355 *      @destructor: Destruct function
 356 *      @nfct: Associated connection, if any
 357 *      @nfct_reasm: netfilter conntrack re-assembly pointer
 358 *      @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
 359 *      @skb_iif: ifindex of device we arrived on
 360 *      @tc_index: Traffic control index
 361 *      @tc_verd: traffic control verdict
 362 *      @rxhash: the packet hash computed on receive
 363 *      @queue_mapping: Queue mapping for multiqueue devices
 364 *      @ndisc_nodetype: router type (from link layer)
 365 *      @ooo_okay: allow the mapping of a socket to a queue to be changed
 366 *      @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport
 367 *              ports.
 368 *      @wifi_acked_valid: wifi_acked was set
 369 *      @wifi_acked: whether frame was acked on wifi or not
 370 *      @no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS
 371 *      @dma_cookie: a cookie to one of several possible DMA operations
 372 *              done by skb DMA functions
 373 *      @secmark: security marking
 374 *      @mark: Generic packet mark
 375 *      @dropcount: total number of sk_receive_queue overflows
 376 *      @vlan_tci: vlan tag control information
 377 *      @transport_header: Transport layer header
 378 *      @network_header: Network layer header
 379 *      @mac_header: Link layer header
 380 *      @tail: Tail pointer
 381 *      @end: End pointer
 382 *      @head: Head of buffer
 383 *      @data: Data head pointer
 384 *      @truesize: Buffer size
 385 *      @users: User count - see {datagram,tcp}.c
 386 */
 387
 388struct sk_buff {
 389        /* These two members must be first. */
 390        struct sk_buff          *next;
 391        struct sk_buff          *prev;
 392
 393        ktime_t                 tstamp;
 394
 395        struct sock             *sk;
 396        struct net_device       *dev;
 397
 398        /*
 399         * This is the control buffer. It is free to use for every
 400         * layer. Please put your private variables there. If you
 401         * want to keep them across layers you have to do a skb_clone()
 402         * first. This is owned by whoever has the skb queued ATM.
 403         */
 404        char                    cb[48] __aligned(8);
 405
 406        unsigned long           _skb_refdst;
 407#ifdef CONFIG_XFRM
 408        struct  sec_path        *sp;
 409#endif
 410        unsigned int            len,
 411                                data_len;
 412        __u16                   mac_len,
 413                                hdr_len;
 414        union {
 415                __wsum          csum;
 416                struct {
 417                        __u16   csum_start;
 418                        __u16   csum_offset;
 419                };
 420        };
 421        __u32                   priority;
 422        kmemcheck_bitfield_begin(flags1);
 423        __u8                    local_df:1,
 424                                cloned:1,
 425                                ip_summed:2,
 426                                nohdr:1,
 427                                nfctinfo:3;
 428        __u8                    pkt_type:3,
 429                                fclone:2,
 430                                ipvs_property:1,
 431                                peeked:1,
 432                                nf_trace:1;
 433        kmemcheck_bitfield_end(flags1);
 434        __be16                  protocol;
 435
 436        void                    (*destructor)(struct sk_buff *skb);
 437#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 438        struct nf_conntrack     *nfct;
 439#endif
 440#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 441        struct sk_buff          *nfct_reasm;
 442#endif
 443#ifdef CONFIG_BRIDGE_NETFILTER
 444        struct nf_bridge_info   *nf_bridge;
 445#endif
 446
 447        int                     skb_iif;
 448
 449        __u32                   rxhash;
 450
 451        __u16                   vlan_tci;
 452
 453#ifdef CONFIG_NET_SCHED
 454        __u16                   tc_index;       /* traffic control index */
 455#ifdef CONFIG_NET_CLS_ACT
 456        __u16                   tc_verd;        /* traffic control verdict */
 457#endif
 458#endif
 459
 460        __u16                   queue_mapping;
 461        kmemcheck_bitfield_begin(flags2);
 462#ifdef CONFIG_IPV6_NDISC_NODETYPE
 463        __u8                    ndisc_nodetype:2;
 464#endif
 465        __u8                    pfmemalloc:1;
 466        __u8                    ooo_okay:1;
 467        __u8                    l4_rxhash:1;
 468        __u8                    wifi_acked_valid:1;
 469        __u8                    wifi_acked:1;
 470        __u8                    no_fcs:1;
 471        __u8                    head_frag:1;
 472        /* 8/10 bit hole (depending on ndisc_nodetype presence) */
 473        kmemcheck_bitfield_end(flags2);
 474
 475#ifdef CONFIG_NET_DMA
 476        dma_cookie_t            dma_cookie;
 477#endif
 478#ifdef CONFIG_NETWORK_SECMARK
 479        __u32                   secmark;
 480#endif
 481        union {
 482                __u32           mark;
 483                __u32           dropcount;
 484                __u32           avail_size;
 485        };
 486
 487        sk_buff_data_t          transport_header;
 488        sk_buff_data_t          network_header;
 489        sk_buff_data_t          mac_header;
 490        /* These elements must be at the end, see alloc_skb() for details.  */
 491        sk_buff_data_t          tail;
 492        sk_buff_data_t          end;
 493        unsigned char           *head,
 494                                *data;
 495        unsigned int            truesize;
 496        atomic_t                users;
 497};
 498
 499#ifdef __KERNEL__
 500/*
 501 *      Handling routines are only of interest to the kernel
 502 */
 503#include <linux/slab.h>
 504
 505
 506#define SKB_ALLOC_FCLONE        0x01
 507#define SKB_ALLOC_RX            0x02
 508
 509/* Returns true if the skb was allocated from PFMEMALLOC reserves */
 510static inline bool skb_pfmemalloc(const struct sk_buff *skb)
 511{
 512        return unlikely(skb->pfmemalloc);
 513}
 514
 515/*
 516 * skb might have a dst pointer attached, refcounted or not.
 517 * _skb_refdst low order bit is set if refcount was _not_ taken
 518 */
 519#define SKB_DST_NOREF   1UL
 520#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
 521
 522/**
 523 * skb_dst - returns skb dst_entry
 524 * @skb: buffer
 525 *
 526 * Returns skb dst_entry, regardless of reference taken or not.
 527 */
 528static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
 529{
 530        /* If refdst was not refcounted, check we still are in a 
 531         * rcu_read_lock section
 532         */
 533        WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
 534                !rcu_read_lock_held() &&
 535                !rcu_read_lock_bh_held());
 536        return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
 537}
 538
 539/**
 540 * skb_dst_set - sets skb dst
 541 * @skb: buffer
 542 * @dst: dst entry
 543 *
 544 * Sets skb dst, assuming a reference was taken on dst and should
 545 * be released by skb_dst_drop()
 546 */
 547static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
 548{
 549        skb->_skb_refdst = (unsigned long)dst;
 550}
 551
 552extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst);
 553
 554/**
 555 * skb_dst_is_noref - Test if skb dst isn't refcounted
 556 * @skb: buffer
 557 */
 558static inline bool skb_dst_is_noref(const struct sk_buff *skb)
 559{
 560        return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
 561}
 562
 563static inline struct rtable *skb_rtable(const struct sk_buff *skb)
 564{
 565        return (struct rtable *)skb_dst(skb);
 566}
 567
 568extern void kfree_skb(struct sk_buff *skb);
 569extern void consume_skb(struct sk_buff *skb);
 570extern void            __kfree_skb(struct sk_buff *skb);
 571extern struct kmem_cache *skbuff_head_cache;
 572
 573extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
 574extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
 575                             bool *fragstolen, int *delta_truesize);
 576
 577extern struct sk_buff *__alloc_skb(unsigned int size,
 578                                   gfp_t priority, int flags, int node);
 579extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
 580static inline struct sk_buff *alloc_skb(unsigned int size,
 581                                        gfp_t priority)
 582{
 583        return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
 584}
 585
 586static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
 587                                               gfp_t priority)
 588{
 589        return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
 590}
 591
 592extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
 593extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
 594extern struct sk_buff *skb_clone(struct sk_buff *skb,
 595                                 gfp_t priority);
 596extern struct sk_buff *skb_copy(const struct sk_buff *skb,
 597                                gfp_t priority);
 598extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
 599                                 int headroom, gfp_t gfp_mask);
 600
 601extern int             pskb_expand_head(struct sk_buff *skb,
 602                                        int nhead, int ntail,
 603                                        gfp_t gfp_mask);
 604extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
 605                                            unsigned int headroom);
 606extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
 607                                       int newheadroom, int newtailroom,
 608                                       gfp_t priority);
 609extern int             skb_to_sgvec(struct sk_buff *skb,
 610                                    struct scatterlist *sg, int offset,
 611                                    int len);
 612extern int             skb_cow_data(struct sk_buff *skb, int tailbits,
 613                                    struct sk_buff **trailer);
 614extern int             skb_pad(struct sk_buff *skb, int pad);
 615#define dev_kfree_skb(a)        consume_skb(a)
 616
 617extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
 618                        int getfrag(void *from, char *to, int offset,
 619                        int len,int odd, struct sk_buff *skb),
 620                        void *from, int length);
 621
 622struct skb_seq_state {
 623        __u32           lower_offset;
 624        __u32           upper_offset;
 625        __u32           frag_idx;
 626        __u32           stepped_offset;
 627        struct sk_buff  *root_skb;
 628        struct sk_buff  *cur_skb;
 629        __u8            *frag_data;
 630};
 631
 632extern void           skb_prepare_seq_read(struct sk_buff *skb,
 633                                           unsigned int from, unsigned int to,
 634                                           struct skb_seq_state *st);
 635extern unsigned int   skb_seq_read(unsigned int consumed, const u8 **data,
 636                                   struct skb_seq_state *st);
 637extern void           skb_abort_seq_read(struct skb_seq_state *st);
 638
 639extern unsigned int   skb_find_text(struct sk_buff *skb, unsigned int from,
 640                                    unsigned int to, struct ts_config *config,
 641                                    struct ts_state *state);
 642
 643extern void __skb_get_rxhash(struct sk_buff *skb);
 644static inline __u32 skb_get_rxhash(struct sk_buff *skb)
 645{
 646        if (!skb->rxhash)
 647                __skb_get_rxhash(skb);
 648
 649        return skb->rxhash;
 650}
 651
 652#ifdef NET_SKBUFF_DATA_USES_OFFSET
 653static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
 654{
 655        return skb->head + skb->end;
 656}
 657
 658static inline unsigned int skb_end_offset(const struct sk_buff *skb)
 659{
 660        return skb->end;
 661}
 662#else
 663static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
 664{
 665        return skb->end;
 666}
 667
 668static inline unsigned int skb_end_offset(const struct sk_buff *skb)
 669{
 670        return skb->end - skb->head;
 671}
 672#endif
 673
 674/* Internal */
 675#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
 676
 677static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
 678{
 679        return &skb_shinfo(skb)->hwtstamps;
 680}
 681
 682/**
 683 *      skb_queue_empty - check if a queue is empty
 684 *      @list: queue head
 685 *
 686 *      Returns true if the queue is empty, false otherwise.
 687 */
 688static inline int skb_queue_empty(const struct sk_buff_head *list)
 689{
 690        return list->next == (struct sk_buff *)list;
 691}
 692
 693/**
 694 *      skb_queue_is_last - check if skb is the last entry in the queue
 695 *      @list: queue head
 696 *      @skb: buffer
 697 *
 698 *      Returns true if @skb is the last buffer on the list.
 699 */
 700static inline bool skb_queue_is_last(const struct sk_buff_head *list,
 701                                     const struct sk_buff *skb)
 702{
 703        return skb->next == (struct sk_buff *)list;
 704}
 705
 706/**
 707 *      skb_queue_is_first - check if skb is the first entry in the queue
 708 *      @list: queue head
 709 *      @skb: buffer
 710 *
 711 *      Returns true if @skb is the first buffer on the list.
 712 */
 713static inline bool skb_queue_is_first(const struct sk_buff_head *list,
 714                                      const struct sk_buff *skb)
 715{
 716        return skb->prev == (struct sk_buff *)list;
 717}
 718
 719/**
 720 *      skb_queue_next - return the next packet in the queue
 721 *      @list: queue head
 722 *      @skb: current buffer
 723 *
 724 *      Return the next packet in @list after @skb.  It is only valid to
 725 *      call this if skb_queue_is_last() evaluates to false.
 726 */
 727static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
 728                                             const struct sk_buff *skb)
 729{
 730        /* This BUG_ON may seem severe, but if we just return then we
 731         * are going to dereference garbage.
 732         */
 733        BUG_ON(skb_queue_is_last(list, skb));
 734        return skb->next;
 735}
 736
 737/**
 738 *      skb_queue_prev - return the prev packet in the queue
 739 *      @list: queue head
 740 *      @skb: current buffer
 741 *
 742 *      Return the prev packet in @list before @skb.  It is only valid to
 743 *      call this if skb_queue_is_first() evaluates to false.
 744 */
 745static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
 746                                             const struct sk_buff *skb)
 747{
 748        /* This BUG_ON may seem severe, but if we just return then we
 749         * are going to dereference garbage.
 750         */
 751        BUG_ON(skb_queue_is_first(list, skb));
 752        return skb->prev;
 753}
 754
 755/**
 756 *      skb_get - reference buffer
 757 *      @skb: buffer to reference
 758 *
 759 *      Makes another reference to a socket buffer and returns a pointer
 760 *      to the buffer.
 761 */
 762static inline struct sk_buff *skb_get(struct sk_buff *skb)
 763{
 764        atomic_inc(&skb->users);
 765        return skb;
 766}
 767
 768/*
 769 * If users == 1, we are the only owner and are can avoid redundant
 770 * atomic change.
 771 */
 772
 773/**
 774 *      skb_cloned - is the buffer a clone
 775 *      @skb: buffer to check
 776 *
 777 *      Returns true if the buffer was generated with skb_clone() and is
 778 *      one of multiple shared copies of the buffer. Cloned buffers are
 779 *      shared data so must not be written to under normal circumstances.
 780 */
 781static inline int skb_cloned(const struct sk_buff *skb)
 782{
 783        return skb->cloned &&
 784               (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
 785}
 786
 787/**
 788 *      skb_header_cloned - is the header a clone
 789 *      @skb: buffer to check
 790 *
 791 *      Returns true if modifying the header part of the buffer requires
 792 *      the data to be copied.
 793 */
 794static inline int skb_header_cloned(const struct sk_buff *skb)
 795{
 796        int dataref;
 797
 798        if (!skb->cloned)
 799                return 0;
 800
 801        dataref = atomic_read(&skb_shinfo(skb)->dataref);
 802        dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
 803        return dataref != 1;
 804}
 805
 806/**
 807 *      skb_header_release - release reference to header
 808 *      @skb: buffer to operate on
 809 *
 810 *      Drop a reference to the header part of the buffer.  This is done
 811 *      by acquiring a payload reference.  You must not read from the header
 812 *      part of skb->data after this.
 813 */
 814static inline void skb_header_release(struct sk_buff *skb)
 815{
 816        BUG_ON(skb->nohdr);
 817        skb->nohdr = 1;
 818        atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
 819}
 820
 821/**
 822 *      skb_shared - is the buffer shared
 823 *      @skb: buffer to check
 824 *
 825 *      Returns true if more than one person has a reference to this
 826 *      buffer.
 827 */
 828static inline int skb_shared(const struct sk_buff *skb)
 829{
 830        return atomic_read(&skb->users) != 1;
 831}
 832
 833/**
 834 *      skb_share_check - check if buffer is shared and if so clone it
 835 *      @skb: buffer to check
 836 *      @pri: priority for memory allocation
 837 *
 838 *      If the buffer is shared the buffer is cloned and the old copy
 839 *      drops a reference. A new clone with a single reference is returned.
 840 *      If the buffer is not shared the original buffer is returned. When
 841 *      being called from interrupt status or with spinlocks held pri must
 842 *      be GFP_ATOMIC.
 843 *
 844 *      NULL is returned on a memory allocation failure.
 845 */
 846static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
 847                                              gfp_t pri)
 848{
 849        might_sleep_if(pri & __GFP_WAIT);
 850        if (skb_shared(skb)) {
 851                struct sk_buff *nskb = skb_clone(skb, pri);
 852                kfree_skb(skb);
 853                skb = nskb;
 854        }
 855        return skb;
 856}
 857
 858/*
 859 *      Copy shared buffers into a new sk_buff. We effectively do COW on
 860 *      packets to handle cases where we have a local reader and forward
 861 *      and a couple of other messy ones. The normal one is tcpdumping
 862 *      a packet thats being forwarded.
 863 */
 864
 865/**
 866 *      skb_unshare - make a copy of a shared buffer
 867 *      @skb: buffer to check
 868 *      @pri: priority for memory allocation
 869 *
 870 *      If the socket buffer is a clone then this function creates a new
 871 *      copy of the data, drops a reference count on the old copy and returns
 872 *      the new copy with the reference count at 1. If the buffer is not a clone
 873 *      the original buffer is returned. When called with a spinlock held or
 874 *      from interrupt state @pri must be %GFP_ATOMIC
 875 *
 876 *      %NULL is returned on a memory allocation failure.
 877 */
 878static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
 879                                          gfp_t pri)
 880{
 881        might_sleep_if(pri & __GFP_WAIT);
 882        if (skb_cloned(skb)) {
 883                struct sk_buff *nskb = skb_copy(skb, pri);
 884                kfree_skb(skb); /* Free our shared copy */
 885                skb = nskb;
 886        }
 887        return skb;
 888}
 889
 890/**
 891 *      skb_peek - peek at the head of an &sk_buff_head
 892 *      @list_: list to peek at
 893 *
 894 *      Peek an &sk_buff. Unlike most other operations you _MUST_
 895 *      be careful with this one. A peek leaves the buffer on the
 896 *      list and someone else may run off with it. You must hold
 897 *      the appropriate locks or have a private queue to do this.
 898 *
 899 *      Returns %NULL for an empty list or a pointer to the head element.
 900 *      The reference count is not incremented and the reference is therefore
 901 *      volatile. Use with caution.
 902 */
 903static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
 904{
 905        struct sk_buff *skb = list_->next;
 906
 907        if (skb == (struct sk_buff *)list_)
 908                skb = NULL;
 909        return skb;
 910}
 911
 912/**
 913 *      skb_peek_next - peek skb following the given one from a queue
 914 *      @skb: skb to start from
 915 *      @list_: list to peek at
 916 *
 917 *      Returns %NULL when the end of the list is met or a pointer to the
 918 *      next element. The reference count is not incremented and the
 919 *      reference is therefore volatile. Use with caution.
 920 */
 921static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
 922                const struct sk_buff_head *list_)
 923{
 924        struct sk_buff *next = skb->next;
 925
 926        if (next == (struct sk_buff *)list_)
 927                next = NULL;
 928        return next;
 929}
 930
 931/**
 932 *      skb_peek_tail - peek at the tail of an &sk_buff_head
 933 *      @list_: list to peek at
 934 *
 935 *      Peek an &sk_buff. Unlike most other operations you _MUST_
 936 *      be careful with this one. A peek leaves the buffer on the
 937 *      list and someone else may run off with it. You must hold
 938 *      the appropriate locks or have a private queue to do this.
 939 *
 940 *      Returns %NULL for an empty list or a pointer to the tail element.
 941 *      The reference count is not incremented and the reference is therefore
 942 *      volatile. Use with caution.
 943 */
 944static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
 945{
 946        struct sk_buff *skb = list_->prev;
 947
 948        if (skb == (struct sk_buff *)list_)
 949                skb = NULL;
 950        return skb;
 951
 952}
 953
 954/**
 955 *      skb_queue_len   - get queue length
 956 *      @list_: list to measure
 957 *
 958 *      Return the length of an &sk_buff queue.
 959 */
 960static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
 961{
 962        return list_->qlen;
 963}
 964
 965/**
 966 *      __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
 967 *      @list: queue to initialize
 968 *
 969 *      This initializes only the list and queue length aspects of
 970 *      an sk_buff_head object.  This allows to initialize the list
 971 *      aspects of an sk_buff_head without reinitializing things like
 972 *      the spinlock.  It can also be used for on-stack sk_buff_head
 973 *      objects where the spinlock is known to not be used.
 974 */
 975static inline void __skb_queue_head_init(struct sk_buff_head *list)
 976{
 977        list->prev = list->next = (struct sk_buff *)list;
 978        list->qlen = 0;
 979}
 980
 981/*
 982 * This function creates a split out lock class for each invocation;
 983 * this is needed for now since a whole lot of users of the skb-queue
 984 * infrastructure in drivers have different locking usage (in hardirq)
 985 * than the networking core (in softirq only). In the long run either the
 986 * network layer or drivers should need annotation to consolidate the
 987 * main types of usage into 3 classes.
 988 */
 989static inline void skb_queue_head_init(struct sk_buff_head *list)
 990{
 991        spin_lock_init(&list->lock);
 992        __skb_queue_head_init(list);
 993}
 994
 995static inline void skb_queue_head_init_class(struct sk_buff_head *list,
 996                struct lock_class_key *class)
 997{
 998        skb_queue_head_init(list);
 999        lockdep_set_class(&list->lock, class);
1000}
1001
1002/*
1003 *      Insert an sk_buff on a list.
1004 *
1005 *      The "__skb_xxxx()" functions are the non-atomic ones that
1006 *      can only be called with interrupts disabled.
1007 */
1008extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
1009static inline void __skb_insert(struct sk_buff *newsk,
1010                                struct sk_buff *prev, struct sk_buff *next,
1011                                struct sk_buff_head *list)
1012{
1013        newsk->next = next;
1014        newsk->prev = prev;
1015        next->prev  = prev->next = newsk;
1016        list->qlen++;
1017}
1018
1019static inline void __skb_queue_splice(const struct sk_buff_head *list,
1020                                      struct sk_buff *prev,
1021                                      struct sk_buff *next)
1022{
1023        struct sk_buff *first = list->next;
1024        struct sk_buff *last = list->prev;
1025
1026        first->prev = prev;
1027        prev->next = first;
1028
1029        last->next = next;
1030        next->prev = last;
1031}
1032
1033/**
1034 *      skb_queue_splice - join two skb lists, this is designed for stacks
1035 *      @list: the new list to add
1036 *      @head: the place to add it in the first list
1037 */
1038static inline void skb_queue_splice(const struct sk_buff_head *list,
1039                                    struct sk_buff_head *head)
1040{
1041        if (!skb_queue_empty(list)) {
1042                __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1043                head->qlen += list->qlen;
1044        }
1045}
1046
1047/**
1048 *      skb_queue_splice_init - join two skb lists and reinitialise the emptied list
1049 *      @list: the new list to add
1050 *      @head: the place to add it in the first list
1051 *
1052 *      The list at @list is reinitialised
1053 */
1054static inline void skb_queue_splice_init(struct sk_buff_head *list,
1055                                         struct sk_buff_head *head)
1056{
1057        if (!skb_queue_empty(list)) {
1058                __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1059                head->qlen += list->qlen;
1060                __skb_queue_head_init(list);
1061        }
1062}
1063
1064/**
1065 *      skb_queue_splice_tail - join two skb lists, each list being a queue
1066 *      @list: the new list to add
1067 *      @head: the place to add it in the first list
1068 */
1069static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1070                                         struct sk_buff_head *head)
1071{
1072        if (!skb_queue_empty(list)) {
1073                __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1074                head->qlen += list->qlen;
1075        }
1076}
1077
1078/**
1079 *      skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
1080 *      @list: the new list to add
1081 *      @head: the place to add it in the first list
1082 *
1083 *      Each of the lists is a queue.
1084 *      The list at @list is reinitialised
1085 */
1086static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1087                                              struct sk_buff_head *head)
1088{
1089        if (!skb_queue_empty(list)) {
1090                __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1091                head->qlen += list->qlen;
1092                __skb_queue_head_init(list);
1093        }
1094}
1095
1096/**
1097 *      __skb_queue_after - queue a buffer at the list head
1098 *      @list: list to use
1099 *      @prev: place after this buffer
1100 *      @newsk: buffer to queue
1101 *
1102 *      Queue a buffer int the middle of a list. This function takes no locks
1103 *      and you must therefore hold required locks before calling it.
1104 *
1105 *      A buffer cannot be placed on two lists at the same time.
1106 */
1107static inline void __skb_queue_after(struct sk_buff_head *list,
1108                                     struct sk_buff *prev,
1109                                     struct sk_buff *newsk)
1110{
1111        __skb_insert(newsk, prev, prev->next, list);
1112}
1113
1114extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1115                       struct sk_buff_head *list);
1116
1117static inline void __skb_queue_before(struct sk_buff_head *list,
1118                                      struct sk_buff *next,
1119                                      struct sk_buff *newsk)
1120{
1121        __skb_insert(newsk, next->prev, next, list);
1122}
1123
1124/**
1125 *      __skb_queue_head - queue a buffer at the list head
1126 *      @list: list to use
1127 *      @newsk: buffer to queue
1128 *
1129 *      Queue a buffer at the start of a list. This function takes no locks
1130 *      and you must therefore hold required locks before calling it.
1131 *
1132 *      A buffer cannot be placed on two lists at the same time.
1133 */
1134extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1135static inline void __skb_queue_head(struct sk_buff_head *list,
1136                                    struct sk_buff *newsk)
1137{
1138        __skb_queue_after(list, (struct sk_buff *)list, newsk);
1139}
1140
1141/**
1142 *      __skb_queue_tail - queue a buffer at the list tail
1143 *      @list: list to use
1144 *      @newsk: buffer to queue
1145 *
1146 *      Queue a buffer at the end of a list. This function takes no locks
1147 *      and you must therefore hold required locks before calling it.
1148 *
1149 *      A buffer cannot be placed on two lists at the same time.
1150 */
1151extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1152static inline void __skb_queue_tail(struct sk_buff_head *list,
1153                                   struct sk_buff *newsk)
1154{
1155        __skb_queue_before(list, (struct sk_buff *)list, newsk);
1156}
1157
1158/*
1159 * remove sk_buff from list. _Must_ be called atomically, and with
1160 * the list known..
1161 */
1162extern void        skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1163static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1164{
1165        struct sk_buff *next, *prev;
1166
1167        list->qlen--;
1168        next       = skb->next;
1169        prev       = skb->prev;
1170        skb->next  = skb->prev = NULL;
1171        next->prev = prev;
1172        prev->next = next;
1173}
1174
1175/**
1176 *      __skb_dequeue - remove from the head of the queue
1177 *      @list: list to dequeue from
1178 *
1179 *      Remove the head of the list. This function does not take any locks
1180 *      so must be used with appropriate locks held only. The head item is
1181 *      returned or %NULL if the list is empty.
1182 */
1183extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1184static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1185{
1186        struct sk_buff *skb = skb_peek(list);
1187        if (skb)
1188                __skb_unlink(skb, list);
1189        return skb;
1190}
1191
1192/**
1193 *      __skb_dequeue_tail - remove from the tail of the queue
1194 *      @list: list to dequeue from
1195 *
1196 *      Remove the tail of the list. This function does not take any locks
1197 *      so must be used with appropriate locks held only. The tail item is
1198 *      returned or %NULL if the list is empty.
1199 */
1200extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1201static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1202{
1203        struct sk_buff *skb = skb_peek_tail(list);
1204        if (skb)
1205                __skb_unlink(skb, list);
1206        return skb;
1207}
1208
1209
1210static inline bool skb_is_nonlinear(const struct sk_buff *skb)
1211{
1212        return skb->data_len;
1213}
1214
1215static inline unsigned int skb_headlen(const struct sk_buff *skb)
1216{
1217        return skb->len - skb->data_len;
1218}
1219
1220static inline int skb_pagelen(const struct sk_buff *skb)
1221{
1222        int i, len = 0;
1223
1224        for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1225                len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1226        return len + skb_headlen(skb);
1227}
1228
1229/**
1230 * __skb_fill_page_desc - initialise a paged fragment in an skb
1231 * @skb: buffer containing fragment to be initialised
1232 * @i: paged fragment index to initialise
1233 * @page: the page to use for this fragment
1234 * @off: the offset to the data with @page
1235 * @size: the length of the data
1236 *
1237 * Initialises the @i'th fragment of @skb to point to &size bytes at
1238 * offset @off within @page.
1239 *
1240 * Does not take any additional reference on the fragment.
1241 */
1242static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1243                                        struct page *page, int off, int size)
1244{
1245        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1246
1247        /*
1248         * Propagate page->pfmemalloc to the skb if we can. The problem is
1249         * that not all callers have unique ownership of the page. If
1250         * pfmemalloc is set, we check the mapping as a mapping implies
1251         * page->index is set (index and pfmemalloc share space).
1252         * If it's a valid mapping, we cannot use page->pfmemalloc but we
1253         * do not lose pfmemalloc information as the pages would not be
1254         * allocated using __GFP_MEMALLOC.
1255         */
1256        if (page->pfmemalloc && !page->mapping)
1257                skb->pfmemalloc = true;
1258        frag->page.p              = page;
1259        frag->page_offset         = off;
1260        skb_frag_size_set(frag, size);
1261}
1262
1263/**
1264 * skb_fill_page_desc - initialise a paged fragment in an skb
1265 * @skb: buffer containing fragment to be initialised
1266 * @i: paged fragment index to initialise
1267 * @page: the page to use for this fragment
1268 * @off: the offset to the data with @page
1269 * @size: the length of the data
1270 *
1271 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
1272 * @skb to point to &size bytes at offset @off within @page. In
1273 * addition updates @skb such that @i is the last fragment.
1274 *
1275 * Does not take any additional reference on the fragment.
1276 */
1277static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1278                                      struct page *page, int off, int size)
1279{
1280        __skb_fill_page_desc(skb, i, page, off, size);
1281        skb_shinfo(skb)->nr_frags = i + 1;
1282}
1283
1284extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1285                            int off, int size, unsigned int truesize);
1286
1287#define SKB_PAGE_ASSERT(skb)    BUG_ON(skb_shinfo(skb)->nr_frags)
1288#define SKB_FRAG_ASSERT(skb)    BUG_ON(skb_has_frag_list(skb))
1289#define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))
1290
1291#ifdef NET_SKBUFF_DATA_USES_OFFSET
1292static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1293{
1294        return skb->head + skb->tail;
1295}
1296
1297static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1298{
1299        skb->tail = skb->data - skb->head;
1300}
1301
1302static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1303{
1304        skb_reset_tail_pointer(skb);
1305        skb->tail += offset;
1306}
1307#else /* NET_SKBUFF_DATA_USES_OFFSET */
1308static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1309{
1310        return skb->tail;
1311}
1312
1313static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1314{
1315        skb->tail = skb->data;
1316}
1317
1318static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1319{
1320        skb->tail = skb->data + offset;
1321}
1322
1323#endif /* NET_SKBUFF_DATA_USES_OFFSET */
1324
1325/*
1326 *      Add data to an sk_buff
1327 */
1328extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1329static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1330{
1331        unsigned char *tmp = skb_tail_pointer(skb);
1332        SKB_LINEAR_ASSERT(skb);
1333        skb->tail += len;
1334        skb->len  += len;
1335        return tmp;
1336}
1337
1338extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1339static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1340{
1341        skb->data -= len;
1342        skb->len  += len;
1343        return skb->data;
1344}
1345
1346extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1347static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1348{
1349        skb->len -= len;
1350        BUG_ON(skb->len < skb->data_len);
1351        return skb->data += len;
1352}
1353
1354static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1355{
1356        return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1357}
1358
1359extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1360
1361static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1362{
1363        if (len > skb_headlen(skb) &&
1364            !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1365                return NULL;
1366        skb->len -= len;
1367        return skb->data += len;
1368}
1369
1370static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1371{
1372        return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1373}
1374
1375static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1376{
1377        if (likely(len <= skb_headlen(skb)))
1378                return 1;
1379        if (unlikely(len > skb->len))
1380                return 0;
1381        return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1382}
1383
1384/**
1385 *      skb_headroom - bytes at buffer head
1386 *      @skb: buffer to check
1387 *
1388 *      Return the number of bytes of free space at the head of an &sk_buff.
1389 */
1390static inline unsigned int skb_headroom(const struct sk_buff *skb)
1391{
1392        return skb->data - skb->head;
1393}
1394
1395/**
1396 *      skb_tailroom - bytes at buffer end
1397 *      @skb: buffer to check
1398 *
1399 *      Return the number of bytes of free space at the tail of an sk_buff
1400 */
1401static inline int skb_tailroom(const struct sk_buff *skb)
1402{
1403        return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1404}
1405
1406/**
1407 *      skb_availroom - bytes at buffer end
1408 *      @skb: buffer to check
1409 *
1410 *      Return the number of bytes of free space at the tail of an sk_buff
1411 *      allocated by sk_stream_alloc()
1412 */
1413static inline int skb_availroom(const struct sk_buff *skb)
1414{
1415        return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len;
1416}
1417
1418/**
1419 *      skb_reserve - adjust headroom
1420 *      @skb: buffer to alter
1421 *      @len: bytes to move
1422 *
1423 *      Increase the headroom of an empty &sk_buff by reducing the tail
1424 *      room. This is only allowed for an empty buffer.
1425 */
1426static inline void skb_reserve(struct sk_buff *skb, int len)
1427{
1428        skb->data += len;
1429        skb->tail += len;
1430}
1431
1432static inline void skb_reset_mac_len(struct sk_buff *skb)
1433{
1434        skb->mac_len = skb->network_header - skb->mac_header;
1435}
1436
1437#ifdef NET_SKBUFF_DATA_USES_OFFSET
1438static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1439{
1440        return skb->head + skb->transport_header;
1441}
1442
1443static inline void skb_reset_transport_header(struct sk_buff *skb)
1444{
1445        skb->transport_header = skb->data - skb->head;
1446}
1447
1448static inline void skb_set_transport_header(struct sk_buff *skb,
1449                                            const int offset)
1450{
1451        skb_reset_transport_header(skb);
1452        skb->transport_header += offset;
1453}
1454
1455static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1456{
1457        return skb->head + skb->network_header;
1458}
1459
1460static inline void skb_reset_network_header(struct sk_buff *skb)
1461{
1462        skb->network_header = skb->data - skb->head;
1463}
1464
1465static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1466{
1467        skb_reset_network_header(skb);
1468        skb->network_header += offset;
1469}
1470
1471static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1472{
1473        return skb->head + skb->mac_header;
1474}
1475
1476static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1477{
1478        return skb->mac_header != ~0U;
1479}
1480
1481static inline void skb_reset_mac_header(struct sk_buff *skb)
1482{
1483        skb->mac_header = skb->data - skb->head;
1484}
1485
1486static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1487{
1488        skb_reset_mac_header(skb);
1489        skb->mac_header += offset;
1490}
1491
1492#else /* NET_SKBUFF_DATA_USES_OFFSET */
1493
1494static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1495{
1496        return skb->transport_header;
1497}
1498
1499static inline void skb_reset_transport_header(struct sk_buff *skb)
1500{
1501        skb->transport_header = skb->data;
1502}
1503
1504static inline void skb_set_transport_header(struct sk_buff *skb,
1505                                            const int offset)
1506{
1507        skb->transport_header = skb->data + offset;
1508}
1509
1510static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1511{
1512        return skb->network_header;
1513}
1514
1515static inline void skb_reset_network_header(struct sk_buff *skb)
1516{
1517        skb->network_header = skb->data;
1518}
1519
1520static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1521{
1522        skb->network_header = skb->data + offset;
1523}
1524
1525static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1526{
1527        return skb->mac_header;
1528}
1529
1530static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1531{
1532        return skb->mac_header != NULL;
1533}
1534
1535static inline void skb_reset_mac_header(struct sk_buff *skb)
1536{
1537        skb->mac_header = skb->data;
1538}
1539
1540static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1541{
1542        skb->mac_header = skb->data + offset;
1543}
1544#endif /* NET_SKBUFF_DATA_USES_OFFSET */
1545
1546static inline void skb_mac_header_rebuild(struct sk_buff *skb)
1547{
1548        if (skb_mac_header_was_set(skb)) {
1549                const unsigned char *old_mac = skb_mac_header(skb);
1550
1551                skb_set_mac_header(skb, -skb->mac_len);
1552                memmove(skb_mac_header(skb), old_mac, skb->mac_len);
1553        }
1554}
1555
1556static inline int skb_checksum_start_offset(const struct sk_buff *skb)
1557{
1558        return skb->csum_start - skb_headroom(skb);
1559}
1560
1561static inline int skb_transport_offset(const struct sk_buff *skb)
1562{
1563        return skb_transport_header(skb) - skb->data;
1564}
1565
1566static inline u32 skb_network_header_len(const struct sk_buff *skb)
1567{
1568        return skb->transport_header - skb->network_header;
1569}
1570
1571static inline int skb_network_offset(const struct sk_buff *skb)
1572{
1573        return skb_network_header(skb) - skb->data;
1574}
1575
1576static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1577{
1578        return pskb_may_pull(skb, skb_network_offset(skb) + len);
1579}
1580
1581/*
1582 * CPUs often take a performance hit when accessing unaligned memory
1583 * locations. The actual performance hit varies, it can be small if the
1584 * hardware handles it or large if we have to take an exception and fix it
1585 * in software.
1586 *
1587 * Since an ethernet header is 14 bytes network drivers often end up with
1588 * the IP header at an unaligned offset. The IP header can be aligned by
1589 * shifting the start of the packet by 2 bytes. Drivers should do this
1590 * with:
1591 *
1592 * skb_reserve(skb, NET_IP_ALIGN);
1593 *
1594 * The downside to this alignment of the IP header is that the DMA is now
1595 * unaligned. On some architectures the cost of an unaligned DMA is high
1596 * and this cost outweighs the gains made by aligning the IP header.
1597 *
1598 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
1599 * to be overridden.
1600 */
1601#ifndef NET_IP_ALIGN
1602#define NET_IP_ALIGN    2
1603#endif
1604
1605/*
1606 * The networking layer reserves some headroom in skb data (via
1607 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
1608 * the header has to grow. In the default case, if the header has to grow
1609 * 32 bytes or less we avoid the reallocation.
1610 *
1611 * Unfortunately this headroom changes the DMA alignment of the resulting
1612 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
1613 * on some architectures. An architecture can override this value,
1614 * perhaps setting it to a cacheline in size (since that will maintain
1615 * cacheline alignment of the DMA). It must be a power of 2.
1616 *
1617 * Various parts of the networking layer expect at least 32 bytes of
1618 * headroom, you should not reduce this.
1619 *
1620 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
1621 * to reduce average number of cache lines per packet.
1622 * get_rps_cpus() for example only access one 64 bytes aligned block :
1623 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
1624 */
1625#ifndef NET_SKB_PAD
1626#define NET_SKB_PAD     max(32, L1_CACHE_BYTES)
1627#endif
1628
1629extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1630
1631static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1632{
1633        if (unlikely(skb_is_nonlinear(skb))) {
1634                WARN_ON(1);
1635                return;
1636        }
1637        skb->len = len;
1638        skb_set_tail_pointer(skb, len);
1639}
1640
1641extern void skb_trim(struct sk_buff *skb, unsigned int len);
1642
1643static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1644{
1645        if (skb->data_len)
1646                return ___pskb_trim(skb, len);
1647        __skb_trim(skb, len);
1648        return 0;
1649}
1650
1651static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1652{
1653        return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1654}
1655
1656/**
1657 *      pskb_trim_unique - remove end from a paged unique (not cloned) buffer
1658 *      @skb: buffer to alter
1659 *      @len: new length
1660 *
1661 *      This is identical to pskb_trim except that the caller knows that
1662 *      the skb is not cloned so we should never get an error due to out-
1663 *      of-memory.
1664 */
1665static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1666{
1667        int err = pskb_trim(skb, len);
1668        BUG_ON(err);
1669}
1670
1671/**
1672 *      skb_orphan - orphan a buffer
1673 *      @skb: buffer to orphan
1674 *
1675 *      If a buffer currently has an owner then we call the owner's
1676 *      destructor function and make the @skb unowned. The buffer continues
1677 *      to exist but is no longer charged to its former owner.
1678 */
1679static inline void skb_orphan(struct sk_buff *skb)
1680{
1681        if (skb->destructor)
1682                skb->destructor(skb);
1683        skb->destructor = NULL;
1684        skb->sk         = NULL;
1685}
1686
1687/**
1688 *      skb_orphan_frags - orphan the frags contained in a buffer
1689 *      @skb: buffer to orphan frags from
1690 *      @gfp_mask: allocation mask for replacement pages
1691 *
1692 *      For each frag in the SKB which needs a destructor (i.e. has an
1693 *      owner) create a copy of that frag and release the original
1694 *      page by calling the destructor.
1695 */
1696static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
1697{
1698        if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
1699                return 0;
1700        return skb_copy_ubufs(skb, gfp_mask);
1701}
1702
1703/**
1704 *      __skb_queue_purge - empty a list
1705 *      @list: list to empty
1706 *
1707 *      Delete all buffers on an &sk_buff list. Each buffer is removed from
1708 *      the list and one reference dropped. This function does not take the
1709 *      list lock and the caller must hold the relevant locks to use it.
1710 */
1711extern void skb_queue_purge(struct sk_buff_head *list);
1712static inline void __skb_queue_purge(struct sk_buff_head *list)
1713{
1714        struct sk_buff *skb;
1715        while ((skb = __skb_dequeue(list)) != NULL)
1716                kfree_skb(skb);
1717}
1718
1719extern void *netdev_alloc_frag(unsigned int fragsz);
1720
1721extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1722                                          unsigned int length,
1723                                          gfp_t gfp_mask);
1724
1725/**
1726 *      netdev_alloc_skb - allocate an skbuff for rx on a specific device
1727 *      @dev: network device to receive on
1728 *      @length: length to allocate
1729 *
1730 *      Allocate a new &sk_buff and assign it a usage count of one. The
1731 *      buffer has unspecified headroom built in. Users should allocate
1732 *      the headroom they think they need without accounting for the
1733 *      built in space. The built in space is used for optimisations.
1734 *
1735 *      %NULL is returned if there is no free memory. Although this function
1736 *      allocates memory it can be called from an interrupt.
1737 */
1738static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1739                                               unsigned int length)
1740{
1741        return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1742}
1743
1744/* legacy helper around __netdev_alloc_skb() */
1745static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1746                                              gfp_t gfp_mask)
1747{
1748        return __netdev_alloc_skb(NULL, length, gfp_mask);
1749}
1750
1751/* legacy helper around netdev_alloc_skb() */
1752static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1753{
1754        return netdev_alloc_skb(NULL, length);
1755}
1756
1757
1758static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
1759                unsigned int length, gfp_t gfp)
1760{
1761        struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
1762
1763        if (NET_IP_ALIGN && skb)
1764                skb_reserve(skb, NET_IP_ALIGN);
1765        return skb;
1766}
1767
1768static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
1769                unsigned int length)
1770{
1771        return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
1772}
1773
1774/*
1775 *      __skb_alloc_page - allocate pages for ps-rx on a skb and preserve pfmemalloc data
1776 *      @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
1777 *      @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
1778 *      @order: size of the allocation
1779 *
1780 *      Allocate a new page.
1781 *
1782 *      %NULL is returned if there is no free memory.
1783*/
1784static inline struct page *__skb_alloc_pages(gfp_t gfp_mask,
1785                                              struct sk_buff *skb,
1786                                              unsigned int order)
1787{
1788        struct page *page;
1789
1790        gfp_mask |= __GFP_COLD;
1791
1792        if (!(gfp_mask & __GFP_NOMEMALLOC))
1793                gfp_mask |= __GFP_MEMALLOC;
1794
1795        page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
1796        if (skb && page && page->pfmemalloc)
1797                skb->pfmemalloc = true;
1798
1799        return page;
1800}
1801
1802/**
1803 *      __skb_alloc_page - allocate a page for ps-rx for a given skb and preserve pfmemalloc data
1804 *      @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
1805 *      @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
1806 *
1807 *      Allocate a new page.
1808 *
1809 *      %NULL is returned if there is no free memory.
1810 */
1811static inline struct page *__skb_alloc_page(gfp_t gfp_mask,
1812                                             struct sk_buff *skb)
1813{
1814        return __skb_alloc_pages(gfp_mask, skb, 0);
1815}
1816
1817/**
1818 *      skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
1819 *      @page: The page that was allocated from skb_alloc_page
1820 *      @skb: The skb that may need pfmemalloc set
1821 */
1822static inline void skb_propagate_pfmemalloc(struct page *page,
1823                                             struct sk_buff *skb)
1824{
1825        if (page && page->pfmemalloc)
1826                skb->pfmemalloc = true;
1827}
1828
1829/**
1830 * skb_frag_page - retrieve the page refered to by a paged fragment
1831 * @frag: the paged fragment
1832 *
1833 * Returns the &struct page associated with @frag.
1834 */
1835static inline struct page *skb_frag_page(const skb_frag_t *frag)
1836{
1837        return frag->page.p;
1838}
1839
1840/**
1841 * __skb_frag_ref - take an addition reference on a paged fragment.
1842 * @frag: the paged fragment
1843 *
1844 * Takes an additional reference on the paged fragment @frag.
1845 */
1846static inline void __skb_frag_ref(skb_frag_t *frag)
1847{
1848        get_page(skb_frag_page(frag));
1849}
1850
1851/**
1852 * skb_frag_ref - take an addition reference on a paged fragment of an skb.
1853 * @skb: the buffer
1854 * @f: the fragment offset.
1855 *
1856 * Takes an additional reference on the @f'th paged fragment of @skb.
1857 */
1858static inline void skb_frag_ref(struct sk_buff *skb, int f)
1859{
1860        __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
1861}
1862
1863/**
1864 * __skb_frag_unref - release a reference on a paged fragment.
1865 * @frag: the paged fragment
1866 *
1867 * Releases a reference on the paged fragment @frag.
1868 */
1869static inline void __skb_frag_unref(skb_frag_t *frag)
1870{
1871        put_page(skb_frag_page(frag));
1872}
1873
1874/**
1875 * skb_frag_unref - release a reference on a paged fragment of an skb.
1876 * @skb: the buffer
1877 * @f: the fragment offset
1878 *
1879 * Releases a reference on the @f'th paged fragment of @skb.
1880 */
1881static inline void skb_frag_unref(struct sk_buff *skb, int f)
1882{
1883        __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
1884}
1885
1886/**
1887 * skb_frag_address - gets the address of the data contained in a paged fragment
1888 * @frag: the paged fragment buffer
1889 *
1890 * Returns the address of the data within @frag. The page must already
1891 * be mapped.
1892 */
1893static inline void *skb_frag_address(const skb_frag_t *frag)
1894{
1895        return page_address(skb_frag_page(frag)) + frag->page_offset;
1896}
1897
1898/**
1899 * skb_frag_address_safe - gets the address of the data contained in a paged fragment
1900 * @frag: the paged fragment buffer
1901 *
1902 * Returns the address of the data within @frag. Checks that the page
1903 * is mapped and returns %NULL otherwise.
1904 */
1905static inline void *skb_frag_address_safe(const skb_frag_t *frag)
1906{
1907        void *ptr = page_address(skb_frag_page(frag));
1908        if (unlikely(!ptr))
1909                return NULL;
1910
1911        return ptr + frag->page_offset;
1912}
1913
1914/**
1915 * __skb_frag_set_page - sets the page contained in a paged fragment
1916 * @frag: the paged fragment
1917 * @page: the page to set
1918 *
1919 * Sets the fragment @frag to contain @page.
1920 */
1921static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
1922{
1923        frag->page.p = page;
1924}
1925
1926/**
1927 * skb_frag_set_page - sets the page contained in a paged fragment of an skb
1928 * @skb: the buffer
1929 * @f: the fragment offset
1930 * @page: the page to set
1931 *
1932 * Sets the @f'th fragment of @skb to contain @page.
1933 */
1934static inline void skb_frag_set_page(struct sk_buff *skb, int f,
1935                                     struct page *page)
1936{
1937        __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
1938}
1939
1940/**
1941 * skb_frag_dma_map - maps a paged fragment via the DMA API
1942 * @dev: the device to map the fragment to
1943 * @frag: the paged fragment to map
1944 * @offset: the offset within the fragment (starting at the
1945 *          fragment's own offset)
1946 * @size: the number of bytes to map
1947 * @dir: the direction of the mapping (%PCI_DMA_*)
1948 *
1949 * Maps the page associated with @frag to @device.
1950 */
1951static inline dma_addr_t skb_frag_dma_map(struct device *dev,
1952                                          const skb_frag_t *frag,
1953                                          size_t offset, size_t size,
1954                                          enum dma_data_direction dir)
1955{
1956        return dma_map_page(dev, skb_frag_page(frag),
1957                            frag->page_offset + offset, size, dir);
1958}
1959
1960static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
1961                                        gfp_t gfp_mask)
1962{
1963        return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
1964}
1965
1966/**
1967 *      skb_clone_writable - is the header of a clone writable
1968 *      @skb: buffer to check
1969 *      @len: length up to which to write
1970 *
1971 *      Returns true if modifying the header part of the cloned buffer
1972 *      does not requires the data to be copied.
1973 */
1974static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
1975{
1976        return !skb_header_cloned(skb) &&
1977               skb_headroom(skb) + len <= skb->hdr_len;
1978}
1979
1980static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
1981                            int cloned)
1982{
1983        int delta = 0;
1984
1985        if (headroom > skb_headroom(skb))
1986                delta = headroom - skb_headroom(skb);
1987
1988        if (delta || cloned)
1989                return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
1990                                        GFP_ATOMIC);
1991        return 0;
1992}
1993
1994/**
1995 *      skb_cow - copy header of skb when it is required
1996 *      @skb: buffer to cow
1997 *      @headroom: needed headroom
1998 *
1999 *      If the skb passed lacks sufficient headroom or its data part
2000 *      is shared, data is reallocated. If reallocation fails, an error
2001 *      is returned and original skb is not changed.
2002 *
2003 *      The result is skb with writable area skb->head...skb->tail
2004 *      and at least @headroom of space at head.
2005 */
2006static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
2007{
2008        return __skb_cow(skb, headroom, skb_cloned(skb));
2009}
2010
2011/**
2012 *      skb_cow_head - skb_cow but only making the head writable
2013 *      @skb: buffer to cow
2014 *      @headroom: needed headroom
2015 *
2016 *      This function is identical to skb_cow except that we replace the
2017 *      skb_cloned check by skb_header_cloned.  It should be used when
2018 *      you only need to push on some header and do not need to modify
2019 *      the data.
2020 */
2021static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2022{
2023        return __skb_cow(skb, headroom, skb_header_cloned(skb));
2024}
2025
2026/**
2027 *      skb_padto       - pad an skbuff up to a minimal size
2028 *      @skb: buffer to pad
2029 *      @len: minimal length
2030 *
2031 *      Pads up a buffer to ensure the trailing bytes exist and are
2032 *      blanked. If the buffer already contains sufficient data it
2033 *      is untouched. Otherwise it is extended. Returns zero on
2034 *      success. The skb is freed on error.
2035 */
2036 
2037static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2038{
2039        unsigned int size = skb->len;
2040        if (likely(size >= len))
2041                return 0;
2042        return skb_pad(skb, len - size);
2043}
2044
2045static inline int skb_add_data(struct sk_buff *skb,
2046                               char __user *from, int copy)
2047{
2048        const int off = skb->len;
2049
2050        if (skb->ip_summed == CHECKSUM_NONE) {
2051                int err = 0;
2052                __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
2053                                                            copy, 0, &err);
2054                if (!err) {
2055                        skb->csum = csum_block_add(skb->csum, csum, off);
2056                        return 0;
2057                }
2058        } else if (!copy_from_user(skb_put(skb, copy), from, copy))
2059                return 0;
2060
2061        __skb_trim(skb, off);
2062        return -EFAULT;
2063}
2064
2065static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
2066                                    const struct page *page, int off)
2067{
2068        if (i) {
2069                const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
2070
2071                return page == skb_frag_page(frag) &&
2072                       off == frag->page_offset + skb_frag_size(frag);
2073        }
2074        return false;
2075}
2076
2077static inline int __skb_linearize(struct sk_buff *skb)
2078{
2079        return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
2080}
2081
2082/**
2083 *      skb_linearize - convert paged skb to linear one
2084 *      @skb: buffer to linarize
2085 *
2086 *      If there is no free memory -ENOMEM is returned, otherwise zero
2087 *      is returned and the old skb data released.
2088 */
2089static inline int skb_linearize(struct sk_buff *skb)
2090{
2091        return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
2092}
2093
2094/**
2095 *      skb_linearize_cow - make sure skb is linear and writable
2096 *      @skb: buffer to process
2097 *
2098 *      If there is no free memory -ENOMEM is returned, otherwise zero
2099 *      is returned and the old skb data released.
2100 */
2101static inline int skb_linearize_cow(struct sk_buff *skb)
2102{
2103        return skb_is_nonlinear(skb) || skb_cloned(skb) ?
2104               __skb_linearize(skb) : 0;
2105}
2106
2107/**
2108 *      skb_postpull_rcsum - update checksum for received skb after pull
2109 *      @skb: buffer to update
2110 *      @start: start of data before pull
2111 *      @len: length of data pulled
2112 *
2113 *      After doing a pull on a received packet, you need to call this to
2114 *      update the CHECKSUM_COMPLETE checksum, or set ip_summed to
2115 *      CHECKSUM_NONE so that it can be recomputed from scratch.
2116 */
2117
2118static inline void skb_postpull_rcsum(struct sk_buff *skb,
2119                                      const void *start, unsigned int len)
2120{
2121        if (skb->ip_summed == CHECKSUM_COMPLETE)
2122                skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
2123}
2124
2125unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2126
2127/**
2128 *      pskb_trim_rcsum - trim received skb and update checksum
2129 *      @skb: buffer to trim
2130 *      @len: new length
2131 *
2132 *      This is exactly the same as pskb_trim except that it ensures the
2133 *      checksum of received packets are still valid after the operation.
2134 */
2135
2136static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2137{
2138        if (likely(len >= skb->len))
2139                return 0;
2140        if (skb->ip_summed == CHECKSUM_COMPLETE)
2141                skb->ip_summed = CHECKSUM_NONE;
2142        return __pskb_trim(skb, len);
2143}
2144
2145#define skb_queue_walk(queue, skb) \
2146                for (skb = (queue)->next;                                       \
2147                     skb != (struct sk_buff *)(queue);                          \
2148                     skb = skb->next)
2149
2150#define skb_queue_walk_safe(queue, skb, tmp)                                    \
2151                for (skb = (queue)->next, tmp = skb->next;                      \
2152                     skb != (struct sk_buff *)(queue);                          \
2153                     skb = tmp, tmp = skb->next)
2154
2155#define skb_queue_walk_from(queue, skb)                                         \
2156                for (; skb != (struct sk_buff *)(queue);                        \
2157                     skb = skb->next)
2158
2159#define skb_queue_walk_from_safe(queue, skb, tmp)                               \
2160                for (tmp = skb->next;                                           \
2161                     skb != (struct sk_buff *)(queue);                          \
2162                     skb = tmp, tmp = skb->next)
2163
2164#define skb_queue_reverse_walk(queue, skb) \
2165                for (skb = (queue)->prev;                                       \
2166                     skb != (struct sk_buff *)(queue);                          \
2167                     skb = skb->prev)
2168
2169#define skb_queue_reverse_walk_safe(queue, skb, tmp)                            \
2170                for (skb = (queue)->prev, tmp = skb->prev;                      \
2171                     skb != (struct sk_buff *)(queue);                          \
2172                     skb = tmp, tmp = skb->prev)
2173
2174#define skb_queue_reverse_walk_from_safe(queue, skb, tmp)                       \
2175                for (tmp = skb->prev;                                           \
2176                     skb != (struct sk_buff *)(queue);                          \
2177                     skb = tmp, tmp = skb->prev)
2178
2179static inline bool skb_has_frag_list(const struct sk_buff *skb)
2180{
2181        return skb_shinfo(skb)->frag_list != NULL;
2182}
2183
2184static inline void skb_frag_list_init(struct sk_buff *skb)
2185{
2186        skb_shinfo(skb)->frag_list = NULL;
2187}
2188
2189static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
2190{
2191        frag->next = skb_shinfo(skb)->frag_list;
2192        skb_shinfo(skb)->frag_list = frag;
2193}
2194
2195#define skb_walk_frags(skb, iter)       \
2196        for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
2197
2198extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
2199                                           int *peeked, int *off, int *err);
2200extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
2201                                         int noblock, int *err);
2202extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
2203                                     struct poll_table_struct *wait);
2204extern int             skb_copy_datagram_iovec(const struct sk_buff *from,
2205                                               int offset, struct iovec *to,
2206                                               int size);
2207extern int             skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
2208                                                        int hlen,
2209                                                        struct iovec *iov);
2210extern int             skb_copy_datagram_from_iovec(struct sk_buff *skb,
2211                                                    int offset,
2212                                                    const struct iovec *from,
2213                                                    int from_offset,
2214                                                    int len);
2215extern int             skb_copy_datagram_const_iovec(const struct sk_buff *from,
2216                                                     int offset,
2217                                                     const struct iovec *to,
2218                                                     int to_offset,
2219                                                     int size);
2220extern void            skb_free_datagram(struct sock *sk, struct sk_buff *skb);
2221extern void            skb_free_datagram_locked(struct sock *sk,
2222                                                struct sk_buff *skb);
2223extern int             skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
2224                                         unsigned int flags);
2225extern __wsum          skb_checksum(const struct sk_buff *skb, int offset,
2226                                    int len, __wsum csum);
2227extern int             skb_copy_bits(const struct sk_buff *skb, int offset,
2228                                     void *to, int len);
2229extern int             skb_store_bits(struct sk_buff *skb, int offset,
2230                                      const void *from, int len);
2231extern __wsum          skb_copy_and_csum_bits(const struct sk_buff *skb,
2232                                              int offset, u8 *to, int len,
2233                                              __wsum csum);
2234extern int             skb_splice_bits(struct sk_buff *skb,
2235                                                unsigned int offset,
2236                                                struct pipe_inode_info *pipe,
2237                                                unsigned int len,
2238                                                unsigned int flags);
2239extern void            skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
2240extern void            skb_split(struct sk_buff *skb,
2241                                 struct sk_buff *skb1, const u32 len);
2242extern int             skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
2243                                 int shiftlen);
2244
2245extern struct sk_buff *skb_segment(struct sk_buff *skb,
2246                                   netdev_features_t features);
2247
2248static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
2249                                       int len, void *buffer)
2250{
2251        int hlen = skb_headlen(skb);
2252
2253        if (hlen - offset >= len)
2254                return skb->data + offset;
2255
2256        if (skb_copy_bits(skb, offset, buffer, len) < 0)
2257                return NULL;
2258
2259        return buffer;
2260}
2261
2262static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
2263                                             void *to,
2264                                             const unsigned int len)
2265{
2266        memcpy(to, skb->data, len);
2267}
2268
2269static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
2270                                                    const int offset, void *to,
2271                                                    const unsigned int len)
2272{
2273        memcpy(to, skb->data + offset, len);
2274}
2275
2276static inline void skb_copy_to_linear_data(struct sk_buff *skb,
2277                                           const void *from,
2278                                           const unsigned int len)
2279{
2280        memcpy(skb->data, from, len);
2281}
2282
2283static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
2284                                                  const int offset,
2285                                                  const void *from,
2286                                                  const unsigned int len)
2287{
2288        memcpy(skb->data + offset, from, len);
2289}
2290
2291extern void skb_init(void);
2292
2293static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
2294{
2295        return skb->tstamp;
2296}
2297
2298/**
2299 *      skb_get_timestamp - get timestamp from a skb
2300 *      @skb: skb to get stamp from
2301 *      @stamp: pointer to struct timeval to store stamp in
2302 *
2303 *      Timestamps are stored in the skb as offsets to a base timestamp.
2304 *      This function converts the offset back to a struct timeval and stores
2305 *      it in stamp.
2306 */
2307static inline void skb_get_timestamp(const struct sk_buff *skb,
2308                                     struct timeval *stamp)
2309{
2310        *stamp = ktime_to_timeval(skb->tstamp);
2311}
2312
2313static inline void skb_get_timestampns(const struct sk_buff *skb,
2314                                       struct timespec *stamp)
2315{
2316        *stamp = ktime_to_timespec(skb->tstamp);
2317}
2318
2319static inline void __net_timestamp(struct sk_buff *skb)
2320{
2321        skb->tstamp = ktime_get_real();
2322}
2323
2324static inline ktime_t net_timedelta(ktime_t t)
2325{
2326        return ktime_sub(ktime_get_real(), t);
2327}
2328
2329static inline ktime_t net_invalid_timestamp(void)
2330{
2331        return ktime_set(0, 0);
2332}
2333
2334extern void skb_timestamping_init(void);
2335
2336#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
2337
2338extern void skb_clone_tx_timestamp(struct sk_buff *skb);
2339extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
2340
2341#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
2342
2343static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
2344{
2345}
2346
2347static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
2348{
2349        return false;
2350}
2351
2352#endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */
2353
2354/**
2355 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
2356 *
2357 * PHY drivers may accept clones of transmitted packets for
2358 * timestamping via their phy_driver.txtstamp method. These drivers
2359 * must call this function to return the skb back to the stack, with
2360 * or without a timestamp.
2361 *
2362 * @skb: clone of the the original outgoing packet
2363 * @hwtstamps: hardware time stamps, may be NULL if not available
2364 *
2365 */
2366void skb_complete_tx_timestamp(struct sk_buff *skb,
2367                               struct skb_shared_hwtstamps *hwtstamps);
2368
2369/**
2370 * skb_tstamp_tx - queue clone of skb with send time stamps
2371 * @orig_skb:   the original outgoing packet
2372 * @hwtstamps:  hardware time stamps, may be NULL if not available
2373 *
2374 * If the skb has a socket associated, then this function clones the
2375 * skb (thus sharing the actual data and optional structures), stores
2376 * the optional hardware time stamping information (if non NULL) or
2377 * generates a software time stamp (otherwise), then queues the clone
2378 * to the error queue of the socket.  Errors are silently ignored.
2379 */
2380extern void skb_tstamp_tx(struct sk_buff *orig_skb,
2381                        struct skb_shared_hwtstamps *hwtstamps);
2382
2383static inline void sw_tx_timestamp(struct sk_buff *skb)
2384{
2385        if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
2386            !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2387                skb_tstamp_tx(skb, NULL);
2388}
2389
2390/**
2391 * skb_tx_timestamp() - Driver hook for transmit timestamping
2392 *
2393 * Ethernet MAC Drivers should call this function in their hard_xmit()
2394 * function immediately before giving the sk_buff to the MAC hardware.
2395 *
2396 * @skb: A socket buffer.
2397 */
2398static inline void skb_tx_timestamp(struct sk_buff *skb)
2399{
2400        skb_clone_tx_timestamp(skb);
2401        sw_tx_timestamp(skb);
2402}
2403
2404/**
2405 * skb_complete_wifi_ack - deliver skb with wifi status
2406 *
2407 * @skb: the original outgoing packet
2408 * @acked: ack status
2409 *
2410 */
2411void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
2412
2413extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
2414extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
2415
2416static inline int skb_csum_unnecessary(const struct sk_buff *skb)
2417{
2418        return skb->ip_summed & CHECKSUM_UNNECESSARY;
2419}
2420
2421/**
2422 *      skb_checksum_complete - Calculate checksum of an entire packet
2423 *      @skb: packet to process
2424 *
2425 *      This function calculates the checksum over the entire packet plus
2426 *      the value of skb->csum.  The latter can be used to supply the
2427 *      checksum of a pseudo header as used by TCP/UDP.  It returns the
2428 *      checksum.
2429 *
2430 *      For protocols that contain complete checksums such as ICMP/TCP/UDP,
2431 *      this function can be used to verify that checksum on received
2432 *      packets.  In that case the function should return zero if the
2433 *      checksum is correct.  In particular, this function will return zero
2434 *      if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
2435 *      hardware has already verified the correctness of the checksum.
2436 */
2437static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2438{
2439        return skb_csum_unnecessary(skb) ?
2440               0 : __skb_checksum_complete(skb);
2441}
2442
2443#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2444extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
2445static inline void nf_conntrack_put(struct nf_conntrack *nfct)
2446{
2447        if (nfct && atomic_dec_and_test(&nfct->use))
2448                nf_conntrack_destroy(nfct);
2449}
2450static inline void nf_conntrack_get(struct nf_conntrack *nfct)
2451{
2452        if (nfct)
2453                atomic_inc(&nfct->use);
2454}
2455#endif
2456#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2457static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
2458{
2459        if (skb)
2460                atomic_inc(&skb->users);
2461}
2462static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
2463{
2464        if (skb)
2465                kfree_skb(skb);
2466}
2467#endif
2468#ifdef CONFIG_BRIDGE_NETFILTER
2469static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
2470{
2471        if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
2472                kfree(nf_bridge);
2473}
2474static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
2475{
2476        if (nf_bridge)
2477                atomic_inc(&nf_bridge->use);
2478}
2479#endif /* CONFIG_BRIDGE_NETFILTER */
2480static inline void nf_reset(struct sk_buff *skb)
2481{
2482#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2483        nf_conntrack_put(skb->nfct);
2484        skb->nfct = NULL;
2485#endif
2486#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2487        nf_conntrack_put_reasm(skb->nfct_reasm);
2488        skb->nfct_reasm = NULL;
2489#endif
2490#ifdef CONFIG_BRIDGE_NETFILTER
2491        nf_bridge_put(skb->nf_bridge);
2492        skb->nf_bridge = NULL;
2493#endif
2494}
2495
2496/* Note: This doesn't put any conntrack and bridge info in dst. */
2497static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2498{
2499#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2500        dst->nfct = src->nfct;
2501        nf_conntrack_get(src->nfct);
2502        dst->nfctinfo = src->nfctinfo;
2503#endif
2504#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2505        dst->nfct_reasm = src->nfct_reasm;
2506        nf_conntrack_get_reasm(src->nfct_reasm);
2507#endif
2508#ifdef CONFIG_BRIDGE_NETFILTER
2509        dst->nf_bridge  = src->nf_bridge;
2510        nf_bridge_get(src->nf_bridge);
2511#endif
2512}
2513
2514static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2515{
2516#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2517        nf_conntrack_put(dst->nfct);
2518#endif
2519#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2520        nf_conntrack_put_reasm(dst->nfct_reasm);
2521#endif
2522#ifdef CONFIG_BRIDGE_NETFILTER
2523        nf_bridge_put(dst->nf_bridge);
2524#endif
2525        __nf_copy(dst, src);
2526}
2527
2528#ifdef CONFIG_NETWORK_SECMARK
2529static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2530{
2531        to->secmark = from->secmark;
2532}
2533
2534static inline void skb_init_secmark(struct sk_buff *skb)
2535{
2536        skb->secmark = 0;
2537}
2538#else
2539static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2540{ }
2541
2542static inline void skb_init_secmark(struct sk_buff *skb)
2543{ }
2544#endif
2545
2546static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
2547{
2548        skb->queue_mapping = queue_mapping;
2549}
2550
2551static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
2552{
2553        return skb->queue_mapping;
2554}
2555
2556static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
2557{
2558        to->queue_mapping = from->queue_mapping;
2559}
2560
2561static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
2562{
2563        skb->queue_mapping = rx_queue + 1;
2564}
2565
2566static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
2567{
2568        return skb->queue_mapping - 1;
2569}
2570
2571static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
2572{
2573        return skb->queue_mapping != 0;
2574}
2575
2576extern u16 __skb_tx_hash(const struct net_device *dev,
2577                         const struct sk_buff *skb,
2578                         unsigned int num_tx_queues);
2579
2580#ifdef CONFIG_XFRM
2581static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2582{
2583        return skb->sp;
2584}
2585#else
2586static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2587{
2588        return NULL;
2589}
2590#endif
2591
2592static inline bool skb_is_gso(const struct sk_buff *skb)
2593{
2594        return skb_shinfo(skb)->gso_size;
2595}
2596
2597static inline bool skb_is_gso_v6(const struct sk_buff *skb)
2598{
2599        return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
2600}
2601
2602extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
2603
2604static inline bool skb_warn_if_lro(const struct sk_buff *skb)
2605{
2606        /* LRO sets gso_size but not gso_type, whereas if GSO is really
2607         * wanted then gso_type will be set. */
2608        const struct skb_shared_info *shinfo = skb_shinfo(skb);
2609
2610        if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
2611            unlikely(shinfo->gso_type == 0)) {
2612                __skb_warn_lro_forwarding(skb);
2613                return true;
2614        }
2615        return false;
2616}
2617
2618static inline void skb_forward_csum(struct sk_buff *skb)
2619{
2620        /* Unfortunately we don't support this one.  Any brave souls? */
2621        if (skb->ip_summed == CHECKSUM_COMPLETE)
2622                skb->ip_summed = CHECKSUM_NONE;
2623}
2624
2625/**
2626 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
2627 * @skb: skb to check
2628 *
2629 * fresh skbs have their ip_summed set to CHECKSUM_NONE.
2630 * Instead of forcing ip_summed to CHECKSUM_NONE, we can
2631 * use this helper, to document places where we make this assertion.
2632 */
2633static inline void skb_checksum_none_assert(const struct sk_buff *skb)
2634{
2635#ifdef DEBUG
2636        BUG_ON(skb->ip_summed != CHECKSUM_NONE);
2637#endif
2638}
2639
2640bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2641
2642/**
2643 * skb_head_is_locked - Determine if the skb->head is locked down
2644 * @skb: skb to check
2645 *
2646 * The head on skbs build around a head frag can be removed if they are
2647 * not cloned.  This function returns true if the skb head is locked down
2648 * due to either being allocated via kmalloc, or by being a clone with
2649 * multiple references to the head.
2650 */
2651static inline bool skb_head_is_locked(const struct sk_buff *skb)
2652{
2653        return !skb->head_frag || skb_cloned(skb);
2654}
2655#endif  /* __KERNEL__ */
2656#endif  /* _LINUX_SKBUFF_H */
2657
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.