linux/net/core/skbuff.c
<<
>>
Prefs
   1/*
   2 *      Routines having to do with the 'struct sk_buff' memory handlers.
   3 *
   4 *      Authors:        Alan Cox <alan@lxorguk.ukuu.org.uk>
   5 *                      Florian La Roche <rzsfl@rz.uni-sb.de>
   6 *
   7 *      Fixes:
   8 *              Alan Cox        :       Fixed the worst of the load
   9 *                                      balancer bugs.
  10 *              Dave Platt      :       Interrupt stacking fix.
  11 *      Richard Kooijman        :       Timestamp fixes.
  12 *              Alan Cox        :       Changed buffer format.
  13 *              Alan Cox        :       destructor hook for AF_UNIX etc.
  14 *              Linus Torvalds  :       Better skb_clone.
  15 *              Alan Cox        :       Added skb_copy.
  16 *              Alan Cox        :       Added all the changed routines Linus
  17 *                                      only put in the headers
  18 *              Ray VanTassle   :       Fixed --skb->lock in free
  19 *              Alan Cox        :       skb_copy copy arp field
  20 *              Andi Kleen      :       slabified it.
  21 *              Robert Olsson   :       Removed skb_head_pool
  22 *
  23 *      NOTE:
  24 *              The __skb_ routines should be called with interrupts
  25 *      disabled, or you better be *real* sure that the operation is atomic
  26 *      with respect to whatever list is being frobbed (e.g. via lock_sock()
  27 *      or via disabling bottom half handlers, etc).
  28 *
  29 *      This program is free software; you can redistribute it and/or
  30 *      modify it under the terms of the GNU General Public License
  31 *      as published by the Free Software Foundation; either version
  32 *      2 of the License, or (at your option) any later version.
  33 */
  34
  35/*
  36 *      The functions in this file will not compile correctly with gcc 2.4.x
  37 */
  38
  39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  40
  41#include <linux/module.h>
  42#include <linux/types.h>
  43#include <linux/kernel.h>
  44#include <linux/kmemcheck.h>
  45#include <linux/mm.h>
  46#include <linux/interrupt.h>
  47#include <linux/in.h>
  48#include <linux/inet.h>
  49#include <linux/slab.h>
  50#include <linux/netdevice.h>
  51#ifdef CONFIG_NET_CLS_ACT
  52#include <net/pkt_sched.h>
  53#endif
  54#include <linux/string.h>
  55#include <linux/skbuff.h>
  56#include <linux/splice.h>
  57#include <linux/cache.h>
  58#include <linux/rtnetlink.h>
  59#include <linux/init.h>
  60#include <linux/scatterlist.h>
  61#include <linux/errqueue.h>
  62#include <linux/prefetch.h>
  63
  64#include <net/protocol.h>
  65#include <net/dst.h>
  66#include <net/sock.h>
  67#include <net/checksum.h>
  68#include <net/xfrm.h>
  69
  70#include <asm/uaccess.h>
  71#include <trace/events/skb.h>
  72#include <linux/highmem.h>
  73
  74struct kmem_cache *skbuff_head_cache __read_mostly;
  75static struct kmem_cache *skbuff_fclone_cache __read_mostly;
  76
  77static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
  78                                  struct pipe_buffer *buf)
  79{
  80        put_page(buf->page);
  81}
  82
  83static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
  84                                struct pipe_buffer *buf)
  85{
  86        get_page(buf->page);
  87}
  88
  89static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
  90                               struct pipe_buffer *buf)
  91{
  92        return 1;
  93}
  94
  95
  96/* Pipe buffer operations for a socket. */
  97static const struct pipe_buf_operations sock_pipe_buf_ops = {
  98        .can_merge = 0,
  99        .map = generic_pipe_buf_map,
 100        .unmap = generic_pipe_buf_unmap,
 101        .confirm = generic_pipe_buf_confirm,
 102        .release = sock_pipe_buf_release,
 103        .steal = sock_pipe_buf_steal,
 104        .get = sock_pipe_buf_get,
 105};
 106
 107/*
 108 *      Keep out-of-line to prevent kernel bloat.
 109 *      __builtin_return_address is not used because it is not always
 110 *      reliable.
 111 */
 112
 113/**
 114 *      skb_over_panic  -       private function
 115 *      @skb: buffer
 116 *      @sz: size
 117 *      @here: address
 118 *
 119 *      Out of line support code for skb_put(). Not user callable.
 120 */
 121static void skb_over_panic(struct sk_buff *skb, int sz, void *here)
 122{
 123        pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
 124                 __func__, here, skb->len, sz, skb->head, skb->data,
 125                 (unsigned long)skb->tail, (unsigned long)skb->end,
 126                 skb->dev ? skb->dev->name : "<NULL>");
 127        BUG();
 128}
 129
 130/**
 131 *      skb_under_panic -       private function
 132 *      @skb: buffer
 133 *      @sz: size
 134 *      @here: address
 135 *
 136 *      Out of line support code for skb_push(). Not user callable.
 137 */
 138
 139static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
 140{
 141        pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
 142                 __func__, here, skb->len, sz, skb->head, skb->data,
 143                 (unsigned long)skb->tail, (unsigned long)skb->end,
 144                 skb->dev ? skb->dev->name : "<NULL>");
 145        BUG();
 146}
 147
 148
 149/*
 150 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
 151 * the caller if emergency pfmemalloc reserves are being used. If it is and
 152 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
 153 * may be used. Otherwise, the packet data may be discarded until enough
 154 * memory is free
 155 */
 156#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
 157         __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
 158void *__kmalloc_reserve(size_t size, gfp_t flags, int node, unsigned long ip,
 159                         bool *pfmemalloc)
 160{
 161        void *obj;
 162        bool ret_pfmemalloc = false;
 163
 164        /*
 165         * Try a regular allocation, when that fails and we're not entitled
 166         * to the reserves, fail.
 167         */
 168        obj = kmalloc_node_track_caller(size,
 169                                        flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
 170                                        node);
 171        if (obj || !(gfp_pfmemalloc_allowed(flags)))
 172                goto out;
 173
 174        /* Try again but now we are using pfmemalloc reserves */
 175        ret_pfmemalloc = true;
 176        obj = kmalloc_node_track_caller(size, flags, node);
 177
 178out:
 179        if (pfmemalloc)
 180                *pfmemalloc = ret_pfmemalloc;
 181
 182        return obj;
 183}
 184
 185/*      Allocate a new skbuff. We do this ourselves so we can fill in a few
 186 *      'private' fields and also do memory statistics to find all the
 187 *      [BEEP] leaks.
 188 *
 189 */
 190
 191/**
 192 *      __alloc_skb     -       allocate a network buffer
 193 *      @size: size to allocate
 194 *      @gfp_mask: allocation mask
 195 *      @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
 196 *              instead of head cache and allocate a cloned (child) skb.
 197 *              If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
 198 *              allocations in case the data is required for writeback
 199 *      @node: numa node to allocate memory on
 200 *
 201 *      Allocate a new &sk_buff. The returned buffer has no headroom and a
 202 *      tail room of at least size bytes. The object has a reference count
 203 *      of one. The return is the buffer. On a failure the return is %NULL.
 204 *
 205 *      Buffers may only be allocated from interrupts using a @gfp_mask of
 206 *      %GFP_ATOMIC.
 207 */
 208struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 209                            int flags, int node)
 210{
 211        struct kmem_cache *cache;
 212        struct skb_shared_info *shinfo;
 213        struct sk_buff *skb;
 214        u8 *data;
 215        bool pfmemalloc;
 216
 217        cache = (flags & SKB_ALLOC_FCLONE)
 218                ? skbuff_fclone_cache : skbuff_head_cache;
 219
 220        if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
 221                gfp_mask |= __GFP_MEMALLOC;
 222
 223        /* Get the HEAD */
 224        skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
 225        if (!skb)
 226                goto out;
 227        prefetchw(skb);
 228
 229        /* We do our best to align skb_shared_info on a separate cache
 230         * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
 231         * aligned memory blocks, unless SLUB/SLAB debug is enabled.
 232         * Both skb->head and skb_shared_info are cache line aligned.
 233         */
 234        size = SKB_DATA_ALIGN(size);
 235        size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 236        data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
 237        if (!data)
 238                goto nodata;
 239        /* kmalloc(size) might give us more room than requested.
 240         * Put skb_shared_info exactly at the end of allocated zone,
 241         * to allow max possible filling before reallocation.
 242         */
 243        size = SKB_WITH_OVERHEAD(ksize(data));
 244        prefetchw(data + size);
 245
 246        /*
 247         * Only clear those fields we need to clear, not those that we will
 248         * actually initialise below. Hence, don't put any more fields after
 249         * the tail pointer in struct sk_buff!
 250         */
 251        memset(skb, 0, offsetof(struct sk_buff, tail));
 252        /* Account for allocated memory : skb + skb->head */
 253        skb->truesize = SKB_TRUESIZE(size);
 254        skb->pfmemalloc = pfmemalloc;
 255        atomic_set(&skb->users, 1);
 256        skb->head = data;
 257        skb->data = data;
 258        skb_reset_tail_pointer(skb);
 259        skb->end = skb->tail + size;
 260#ifdef NET_SKBUFF_DATA_USES_OFFSET
 261        skb->mac_header = ~0U;
 262#endif
 263
 264        /* make sure we initialize shinfo sequentially */
 265        shinfo = skb_shinfo(skb);
 266        memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
 267        atomic_set(&shinfo->dataref, 1);
 268        kmemcheck_annotate_variable(shinfo->destructor_arg);
 269
 270        if (flags & SKB_ALLOC_FCLONE) {
 271                struct sk_buff *child = skb + 1;
 272                atomic_t *fclone_ref = (atomic_t *) (child + 1);
 273
 274                kmemcheck_annotate_bitfield(child, flags1);
 275                kmemcheck_annotate_bitfield(child, flags2);
 276                skb->fclone = SKB_FCLONE_ORIG;
 277                atomic_set(fclone_ref, 1);
 278
 279                child->fclone = SKB_FCLONE_UNAVAILABLE;
 280                child->pfmemalloc = pfmemalloc;
 281        }
 282out:
 283        return skb;
 284nodata:
 285        kmem_cache_free(cache, skb);
 286        skb = NULL;
 287        goto out;
 288}
 289EXPORT_SYMBOL(__alloc_skb);
 290
 291/**
 292 * build_skb - build a network buffer
 293 * @data: data buffer provided by caller
 294 * @frag_size: size of fragment, or 0 if head was kmalloced
 295 *
 296 * Allocate a new &sk_buff. Caller provides space holding head and
 297 * skb_shared_info. @data must have been allocated by kmalloc()
 298 * The return is the new skb buffer.
 299 * On a failure the return is %NULL, and @data is not freed.
 300 * Notes :
 301 *  Before IO, driver allocates only data buffer where NIC put incoming frame
 302 *  Driver should add room at head (NET_SKB_PAD) and
 303 *  MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
 304 *  After IO, driver calls build_skb(), to allocate sk_buff and populate it
 305 *  before giving packet to stack.
 306 *  RX rings only contains data buffers, not full skbs.
 307 */
 308struct sk_buff *build_skb(void *data, unsigned int frag_size)
 309{
 310        struct skb_shared_info *shinfo;
 311        struct sk_buff *skb;
 312        unsigned int size = frag_size ? : ksize(data);
 313
 314        skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
 315        if (!skb)
 316                return NULL;
 317
 318        size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 319
 320        memset(skb, 0, offsetof(struct sk_buff, tail));
 321        skb->truesize = SKB_TRUESIZE(size);
 322        skb->head_frag = frag_size != 0;
 323        atomic_set(&skb->users, 1);
 324        skb->head = data;
 325        skb->data = data;
 326        skb_reset_tail_pointer(skb);
 327        skb->end = skb->tail + size;
 328#ifdef NET_SKBUFF_DATA_USES_OFFSET
 329        skb->mac_header = ~0U;
 330#endif
 331
 332        /* make sure we initialize shinfo sequentially */
 333        shinfo = skb_shinfo(skb);
 334        memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
 335        atomic_set(&shinfo->dataref, 1);
 336        kmemcheck_annotate_variable(shinfo->destructor_arg);
 337
 338        return skb;
 339}
 340EXPORT_SYMBOL(build_skb);
 341
 342struct netdev_alloc_cache {
 343        struct page_frag        frag;
 344        /* we maintain a pagecount bias, so that we dont dirty cache line
 345         * containing page->_count every time we allocate a fragment.
 346         */
 347        unsigned int            pagecnt_bias;
 348};
 349static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
 350
 351#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
 352#define NETDEV_FRAG_PAGE_MAX_SIZE  (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
 353#define NETDEV_PAGECNT_MAX_BIAS    NETDEV_FRAG_PAGE_MAX_SIZE
 354
 355static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
 356{
 357        struct netdev_alloc_cache *nc;
 358        void *data = NULL;
 359        int order;
 360        unsigned long flags;
 361
 362        local_irq_save(flags);
 363        nc = &__get_cpu_var(netdev_alloc_cache);
 364        if (unlikely(!nc->frag.page)) {
 365refill:
 366                for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
 367                        gfp_t gfp = gfp_mask;
 368
 369                        if (order)
 370                                gfp |= __GFP_COMP | __GFP_NOWARN;
 371                        nc->frag.page = alloc_pages(gfp, order);
 372                        if (likely(nc->frag.page))
 373                                break;
 374                        if (--order < 0)
 375                                goto end;
 376                }
 377                nc->frag.size = PAGE_SIZE << order;
 378recycle:
 379                atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS);
 380                nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
 381                nc->frag.offset = 0;
 382        }
 383
 384        if (nc->frag.offset + fragsz > nc->frag.size) {
 385                /* avoid unnecessary locked operations if possible */
 386                if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) ||
 387                    atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count))
 388                        goto recycle;
 389                goto refill;
 390        }
 391
 392        data = page_address(nc->frag.page) + nc->frag.offset;
 393        nc->frag.offset += fragsz;
 394        nc->pagecnt_bias--;
 395end:
 396        local_irq_restore(flags);
 397        return data;
 398}
 399
 400/**
 401 * netdev_alloc_frag - allocate a page fragment
 402 * @fragsz: fragment size
 403 *
 404 * Allocates a frag from a page for receive buffer.
 405 * Uses GFP_ATOMIC allocations.
 406 */
 407void *netdev_alloc_frag(unsigned int fragsz)
 408{
 409        return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
 410}
 411EXPORT_SYMBOL(netdev_alloc_frag);
 412
 413/**
 414 *      __netdev_alloc_skb - allocate an skbuff for rx on a specific device
 415 *      @dev: network device to receive on
 416 *      @length: length to allocate
 417 *      @gfp_mask: get_free_pages mask, passed to alloc_skb
 418 *
 419 *      Allocate a new &sk_buff and assign it a usage count of one. The
 420 *      buffer has unspecified headroom built in. Users should allocate
 421 *      the headroom they think they need without accounting for the
 422 *      built in space. The built in space is used for optimisations.
 423 *
 424 *      %NULL is returned if there is no free memory.
 425 */
 426struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
 427                                   unsigned int length, gfp_t gfp_mask)
 428{
 429        struct sk_buff *skb = NULL;
 430        unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
 431                              SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 432
 433        if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
 434                void *data;
 435
 436                if (sk_memalloc_socks())
 437                        gfp_mask |= __GFP_MEMALLOC;
 438
 439                data = __netdev_alloc_frag(fragsz, gfp_mask);
 440
 441                if (likely(data)) {
 442                        skb = build_skb(data, fragsz);
 443                        if (unlikely(!skb))
 444                                put_page(virt_to_head_page(data));
 445                }
 446        } else {
 447                skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask,
 448                                  SKB_ALLOC_RX, NUMA_NO_NODE);
 449        }
 450        if (likely(skb)) {
 451                skb_reserve(skb, NET_SKB_PAD);
 452                skb->dev = dev;
 453        }
 454        return skb;
 455}
 456EXPORT_SYMBOL(__netdev_alloc_skb);
 457
 458void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
 459                     int size, unsigned int truesize)
 460{
 461        skb_fill_page_desc(skb, i, page, off, size);
 462        skb->len += size;
 463        skb->data_len += size;
 464        skb->truesize += truesize;
 465}
 466EXPORT_SYMBOL(skb_add_rx_frag);
 467
 468static void skb_drop_list(struct sk_buff **listp)
 469{
 470        struct sk_buff *list = *listp;
 471
 472        *listp = NULL;
 473
 474        do {
 475                struct sk_buff *this = list;
 476                list = list->next;
 477                kfree_skb(this);
 478        } while (list);
 479}
 480
 481static inline void skb_drop_fraglist(struct sk_buff *skb)
 482{
 483        skb_drop_list(&skb_shinfo(skb)->frag_list);
 484}
 485
 486static void skb_clone_fraglist(struct sk_buff *skb)
 487{
 488        struct sk_buff *list;
 489
 490        skb_walk_frags(skb, list)
 491                skb_get(list);
 492}
 493
 494static void skb_free_head(struct sk_buff *skb)
 495{
 496        if (skb->head_frag)
 497                put_page(virt_to_head_page(skb->head));
 498        else
 499                kfree(skb->head);
 500}
 501
 502static void skb_release_data(struct sk_buff *skb)
 503{
 504        if (!skb->cloned ||
 505            !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
 506                               &skb_shinfo(skb)->dataref)) {
 507                if (skb_shinfo(skb)->nr_frags) {
 508                        int i;
 509                        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
 510                                skb_frag_unref(skb, i);
 511                }
 512
 513                /*
 514                 * If skb buf is from userspace, we need to notify the caller
 515                 * the lower device DMA has done;
 516                 */
 517                if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
 518                        struct ubuf_info *uarg;
 519
 520                        uarg = skb_shinfo(skb)->destructor_arg;
 521                        if (uarg->callback)
 522                                uarg->callback(uarg);
 523                }
 524
 525                if (skb_has_frag_list(skb))
 526                        skb_drop_fraglist(skb);
 527
 528                skb_free_head(skb);
 529        }
 530}
 531
 532/*
 533 *      Free an skbuff by memory without cleaning the state.
 534 */
 535static void kfree_skbmem(struct sk_buff *skb)
 536{
 537        struct sk_buff *other;
 538        atomic_t *fclone_ref;
 539
 540        switch (skb->fclone) {
 541        case SKB_FCLONE_UNAVAILABLE:
 542                kmem_cache_free(skbuff_head_cache, skb);
 543                break;
 544
 545        case SKB_FCLONE_ORIG:
 546                fclone_ref = (atomic_t *) (skb + 2);
 547                if (atomic_dec_and_test(fclone_ref))
 548                        kmem_cache_free(skbuff_fclone_cache, skb);
 549                break;
 550
 551        case SKB_FCLONE_CLONE:
 552                fclone_ref = (atomic_t *) (skb + 1);
 553                other = skb - 1;
 554
 555                /* The clone portion is available for
 556                 * fast-cloning again.
 557                 */
 558                skb->fclone = SKB_FCLONE_UNAVAILABLE;
 559
 560                if (atomic_dec_and_test(fclone_ref))
 561                        kmem_cache_free(skbuff_fclone_cache, other);
 562                break;
 563        }
 564}
 565
 566static void skb_release_head_state(struct sk_buff *skb)
 567{
 568        skb_dst_drop(skb);
 569#ifdef CONFIG_XFRM
 570        secpath_put(skb->sp);
 571#endif
 572        if (skb->destructor) {
 573                WARN_ON(in_irq());
 574                skb->destructor(skb);
 575        }
 576#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 577        nf_conntrack_put(skb->nfct);
 578#endif
 579#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 580        nf_conntrack_put_reasm(skb->nfct_reasm);
 581#endif
 582#ifdef CONFIG_BRIDGE_NETFILTER
 583        nf_bridge_put(skb->nf_bridge);
 584#endif
 585/* XXX: IS this still necessary? - JHS */
 586#ifdef CONFIG_NET_SCHED
 587        skb->tc_index = 0;
 588#ifdef CONFIG_NET_CLS_ACT
 589        skb->tc_verd = 0;
 590#endif
 591#endif
 592}
 593
 594/* Free everything but the sk_buff shell. */
 595static void skb_release_all(struct sk_buff *skb)
 596{
 597        skb_release_head_state(skb);
 598        skb_release_data(skb);
 599}
 600
 601/**
 602 *      __kfree_skb - private function
 603 *      @skb: buffer
 604 *
 605 *      Free an sk_buff. Release anything attached to the buffer.
 606 *      Clean the state. This is an internal helper function. Users should
 607 *      always call kfree_skb
 608 */
 609
 610void __kfree_skb(struct sk_buff *skb)
 611{
 612        skb_release_all(skb);
 613        kfree_skbmem(skb);
 614}
 615EXPORT_SYMBOL(__kfree_skb);
 616
 617/**
 618 *      kfree_skb - free an sk_buff
 619 *      @skb: buffer to free
 620 *
 621 *      Drop a reference to the buffer and free it if the usage count has
 622 *      hit zero.
 623 */
 624void kfree_skb(struct sk_buff *skb)
 625{
 626        if (unlikely(!skb))
 627                return;
 628        if (likely(atomic_read(&skb->users) == 1))
 629                smp_rmb();
 630        else if (likely(!atomic_dec_and_test(&skb->users)))
 631                return;
 632        trace_kfree_skb(skb, __builtin_return_address(0));
 633        __kfree_skb(skb);
 634}
 635EXPORT_SYMBOL(kfree_skb);
 636
 637/**
 638 *      consume_skb - free an skbuff
 639 *      @skb: buffer to free
 640 *
 641 *      Drop a ref to the buffer and free it if the usage count has hit zero
 642 *      Functions identically to kfree_skb, but kfree_skb assumes that the frame
 643 *      is being dropped after a failure and notes that
 644 */
 645void consume_skb(struct sk_buff *skb)
 646{
 647        if (unlikely(!skb))
 648                return;
 649        if (likely(atomic_read(&skb->users) == 1))
 650                smp_rmb();
 651        else if (likely(!atomic_dec_and_test(&skb->users)))
 652                return;
 653        trace_consume_skb(skb);
 654        __kfree_skb(skb);
 655}
 656EXPORT_SYMBOL(consume_skb);
 657
 658static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 659{
 660        new->tstamp             = old->tstamp;
 661        new->dev                = old->dev;
 662        new->transport_header   = old->transport_header;
 663        new->network_header     = old->network_header;
 664        new->mac_header         = old->mac_header;
 665        skb_dst_copy(new, old);
 666        new->rxhash             = old->rxhash;
 667        new->ooo_okay           = old->ooo_okay;
 668        new->l4_rxhash          = old->l4_rxhash;
 669        new->no_fcs             = old->no_fcs;
 670#ifdef CONFIG_XFRM
 671        new->sp                 = secpath_get(old->sp);
 672#endif
 673        memcpy(new->cb, old->cb, sizeof(old->cb));
 674        new->csum               = old->csum;
 675        new->local_df           = old->local_df;
 676        new->pkt_type           = old->pkt_type;
 677        new->ip_summed          = old->ip_summed;
 678        skb_copy_queue_mapping(new, old);
 679        new->priority           = old->priority;
 680#if IS_ENABLED(CONFIG_IP_VS)
 681        new->ipvs_property      = old->ipvs_property;
 682#endif
 683        new->pfmemalloc         = old->pfmemalloc;
 684        new->protocol           = old->protocol;
 685        new->mark               = old->mark;
 686        new->skb_iif            = old->skb_iif;
 687        __nf_copy(new, old);
 688#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
 689        new->nf_trace           = old->nf_trace;
 690#endif
 691#ifdef CONFIG_NET_SCHED
 692        new->tc_index           = old->tc_index;
 693#ifdef CONFIG_NET_CLS_ACT
 694        new->tc_verd            = old->tc_verd;
 695#endif
 696#endif
 697        new->vlan_tci           = old->vlan_tci;
 698
 699        skb_copy_secmark(new, old);
 700}
 701
 702/*
 703 * You should not add any new code to this function.  Add it to
 704 * __copy_skb_header above instead.
 705 */
 706static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
 707{
 708#define C(x) n->x = skb->x
 709
 710        n->next = n->prev = NULL;
 711        n->sk = NULL;
 712        __copy_skb_header(n, skb);
 713
 714        C(len);
 715        C(data_len);
 716        C(mac_len);
 717        n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
 718        n->cloned = 1;
 719        n->nohdr = 0;
 720        n->destructor = NULL;
 721        C(tail);
 722        C(end);
 723        C(head);
 724        C(head_frag);
 725        C(data);
 726        C(truesize);
 727        atomic_set(&n->users, 1);
 728
 729        atomic_inc(&(skb_shinfo(skb)->dataref));
 730        skb->cloned = 1;
 731
 732        return n;
 733#undef C
 734}
 735
 736/**
 737 *      skb_morph       -       morph one skb into another
 738 *      @dst: the skb to receive the contents
 739 *      @src: the skb to supply the contents
 740 *
 741 *      This is identical to skb_clone except that the target skb is
 742 *      supplied by the user.
 743 *
 744 *      The target skb is returned upon exit.
 745 */
 746struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
 747{
 748        skb_release_all(dst);
 749        return __skb_clone(dst, src);
 750}
 751EXPORT_SYMBOL_GPL(skb_morph);
 752
 753/**
 754 *      skb_copy_ubufs  -       copy userspace skb frags buffers to kernel
 755 *      @skb: the skb to modify
 756 *      @gfp_mask: allocation priority
 757 *
 758 *      This must be called on SKBTX_DEV_ZEROCOPY skb.
 759 *      It will copy all frags into kernel and drop the reference
 760 *      to userspace pages.
 761 *
 762 *      If this function is called from an interrupt gfp_mask() must be
 763 *      %GFP_ATOMIC.
 764 *
 765 *      Returns 0 on success or a negative error code on failure
 766 *      to allocate kernel memory to copy to.
 767 */
 768int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
 769{
 770        int i;
 771        int num_frags = skb_shinfo(skb)->nr_frags;
 772        struct page *page, *head = NULL;
 773        struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg;
 774
 775        for (i = 0; i < num_frags; i++) {
 776                u8 *vaddr;
 777                skb_frag_t *f = &skb_shinfo(skb)->frags[i];
 778
 779                page = alloc_page(gfp_mask);
 780                if (!page) {
 781                        while (head) {
 782                                struct page *next = (struct page *)head->private;
 783                                put_page(head);
 784                                head = next;
 785                        }
 786                        return -ENOMEM;
 787                }
 788                vaddr = kmap_atomic(skb_frag_page(f));
 789                memcpy(page_address(page),
 790                       vaddr + f->page_offset, skb_frag_size(f));
 791                kunmap_atomic(vaddr);
 792                page->private = (unsigned long)head;
 793                head = page;
 794        }
 795
 796        /* skb frags release userspace buffers */
 797        for (i = 0; i < num_frags; i++)
 798                skb_frag_unref(skb, i);
 799
 800        uarg->callback(uarg);
 801
 802        /* skb frags point to kernel buffers */
 803        for (i = num_frags - 1; i >= 0; i--) {
 804                __skb_fill_page_desc(skb, i, head, 0,
 805                                     skb_shinfo(skb)->frags[i].size);
 806                head = (struct page *)head->private;
 807        }
 808
 809        skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
 810        return 0;
 811}
 812EXPORT_SYMBOL_GPL(skb_copy_ubufs);
 813
 814/**
 815 *      skb_clone       -       duplicate an sk_buff
 816 *      @skb: buffer to clone
 817 *      @gfp_mask: allocation priority
 818 *
 819 *      Duplicate an &sk_buff. The new one is not owned by a socket. Both
 820 *      copies share the same packet data but not structure. The new
 821 *      buffer has a reference count of 1. If the allocation fails the
 822 *      function returns %NULL otherwise the new buffer is returned.
 823 *
 824 *      If this function is called from an interrupt gfp_mask() must be
 825 *      %GFP_ATOMIC.
 826 */
 827
 828struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
 829{
 830        struct sk_buff *n;
 831
 832        if (skb_orphan_frags(skb, gfp_mask))
 833                return NULL;
 834
 835        n = skb + 1;
 836        if (skb->fclone == SKB_FCLONE_ORIG &&
 837            n->fclone == SKB_FCLONE_UNAVAILABLE) {
 838                atomic_t *fclone_ref = (atomic_t *) (n + 1);
 839                n->fclone = SKB_FCLONE_CLONE;
 840                atomic_inc(fclone_ref);
 841        } else {
 842                if (skb_pfmemalloc(skb))
 843                        gfp_mask |= __GFP_MEMALLOC;
 844
 845                n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
 846                if (!n)
 847                        return NULL;
 848
 849                kmemcheck_annotate_bitfield(n, flags1);
 850                kmemcheck_annotate_bitfield(n, flags2);
 851                n->fclone = SKB_FCLONE_UNAVAILABLE;
 852        }
 853
 854        return __skb_clone(n, skb);
 855}
 856EXPORT_SYMBOL(skb_clone);
 857
 858static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 859{
 860#ifndef NET_SKBUFF_DATA_USES_OFFSET
 861        /*
 862         *      Shift between the two data areas in bytes
 863         */
 864        unsigned long offset = new->data - old->data;
 865#endif
 866
 867        __copy_skb_header(new, old);
 868
 869#ifndef NET_SKBUFF_DATA_USES_OFFSET
 870        /* {transport,network,mac}_header are relative to skb->head */
 871        new->transport_header += offset;
 872        new->network_header   += offset;
 873        if (skb_mac_header_was_set(new))
 874                new->mac_header       += offset;
 875#endif
 876        skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
 877        skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
 878        skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
 879}
 880
 881static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
 882{
 883        if (skb_pfmemalloc(skb))
 884                return SKB_ALLOC_RX;
 885        return 0;
 886}
 887
 888/**
 889 *      skb_copy        -       create private copy of an sk_buff
 890 *      @skb: buffer to copy
 891 *      @gfp_mask: allocation priority
 892 *
 893 *      Make a copy of both an &sk_buff and its data. This is used when the
 894 *      caller wishes to modify the data and needs a private copy of the
 895 *      data to alter. Returns %NULL on failure or the pointer to the buffer
 896 *      on success. The returned buffer has a reference count of 1.
 897 *
 898 *      As by-product this function converts non-linear &sk_buff to linear
 899 *      one, so that &sk_buff becomes completely private and caller is allowed
 900 *      to modify all the data of returned buffer. This means that this
 901 *      function is not recommended for use in circumstances when only
 902 *      header is going to be modified. Use pskb_copy() instead.
 903 */
 904
 905struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
 906{
 907        int headerlen = skb_headroom(skb);
 908        unsigned int size = skb_end_offset(skb) + skb->data_len;
 909        struct sk_buff *n = __alloc_skb(size, gfp_mask,
 910                                        skb_alloc_rx_flag(skb), NUMA_NO_NODE);
 911
 912        if (!n)
 913                return NULL;
 914
 915        /* Set the data pointer */
 916        skb_reserve(n, headerlen);
 917        /* Set the tail pointer and length */
 918        skb_put(n, skb->len);
 919
 920        if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
 921                BUG();
 922
 923        copy_skb_header(n, skb);
 924        return n;
 925}
 926EXPORT_SYMBOL(skb_copy);
 927
 928/**
 929 *      __pskb_copy     -       create copy of an sk_buff with private head.
 930 *      @skb: buffer to copy
 931 *      @headroom: headroom of new skb
 932 *      @gfp_mask: allocation priority
 933 *
 934 *      Make a copy of both an &sk_buff and part of its data, located
 935 *      in header. Fragmented data remain shared. This is used when
 936 *      the caller wishes to modify only header of &sk_buff and needs
 937 *      private copy of the header to alter. Returns %NULL on failure
 938 *      or the pointer to the buffer on success.
 939 *      The returned buffer has a reference count of 1.
 940 */
 941
 942struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
 943{
 944        unsigned int size = skb_headlen(skb) + headroom;
 945        struct sk_buff *n = __alloc_skb(size, gfp_mask,
 946                                        skb_alloc_rx_flag(skb), NUMA_NO_NODE);
 947
 948        if (!n)
 949                goto out;
 950
 951        /* Set the data pointer */
 952        skb_reserve(n, headroom);
 953        /* Set the tail pointer and length */
 954        skb_put(n, skb_headlen(skb));
 955        /* Copy the bytes */
 956        skb_copy_from_linear_data(skb, n->data, n->len);
 957
 958        n->truesize += skb->data_len;
 959        n->data_len  = skb->data_len;
 960        n->len       = skb->len;
 961
 962        if (skb_shinfo(skb)->nr_frags) {
 963                int i;
 964
 965                if (skb_orphan_frags(skb, gfp_mask)) {
 966                        kfree_skb(n);
 967                        n = NULL;
 968                        goto out;
 969                }
 970                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 971                        skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
 972                        skb_frag_ref(skb, i);
 973                }
 974                skb_shinfo(n)->nr_frags = i;
 975        }
 976
 977        if (skb_has_frag_list(skb)) {
 978                skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
 979                skb_clone_fraglist(n);
 980        }
 981
 982        copy_skb_header(n, skb);
 983out:
 984        return n;
 985}
 986EXPORT_SYMBOL(__pskb_copy);
 987
 988/**
 989 *      pskb_expand_head - reallocate header of &sk_buff
 990 *      @skb: buffer to reallocate
 991 *      @nhead: room to add at head
 992 *      @ntail: room to add at tail
 993 *      @gfp_mask: allocation priority
 994 *
 995 *      Expands (or creates identical copy, if &nhead and &ntail are zero)
 996 *      header of skb. &sk_buff itself is not changed. &sk_buff MUST have
 997 *      reference count of 1. Returns zero in the case of success or error,
 998 *      if expansion failed. In the last case, &sk_buff is not changed.
 999 *
1000 *      All the pointers pointing into skb header may change and must be
1001 *      reloaded after call to this function.
1002 */
1003
1004int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1005                     gfp_t gfp_mask)
1006{
1007        int i;
1008        u8 *data;
1009        int size = nhead + skb_end_offset(skb) + ntail;
1010        long off;
1011
1012        BUG_ON(nhead < 0);
1013
1014        if (skb_shared(skb))
1015                BUG();
1016
1017        size = SKB_DATA_ALIGN(size);
1018
1019        if (skb_pfmemalloc(skb))
1020                gfp_mask |= __GFP_MEMALLOC;
1021        data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1022                               gfp_mask, NUMA_NO_NODE, NULL);
1023        if (!data)
1024                goto nodata;
1025        size = SKB_WITH_OVERHEAD(ksize(data));
1026
1027        /* Copy only real data... and, alas, header. This should be
1028         * optimized for the cases when header is void.
1029         */
1030        memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
1031
1032        memcpy((struct skb_shared_info *)(data + size),
1033               skb_shinfo(skb),
1034               offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
1035
1036        /*
1037         * if shinfo is shared we must drop the old head gracefully, but if it
1038         * is not we can just drop the old head and let the existing refcount
1039         * be since all we did is relocate the values
1040         */
1041        if (skb_cloned(skb)) {
1042                /* copy this zero copy skb frags */
1043                if (skb_orphan_frags(skb, gfp_mask))
1044                        goto nofrags;
1045                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1046                        skb_frag_ref(skb, i);
1047
1048                if (skb_has_frag_list(skb))
1049                        skb_clone_fraglist(skb);
1050
1051                skb_release_data(skb);
1052        } else {
1053                skb_free_head(skb);
1054        }
1055        off = (data + nhead) - skb->head;
1056
1057        skb->head     = data;
1058        skb->head_frag = 0;
1059        skb->data    += off;
1060#ifdef NET_SKBUFF_DATA_USES_OFFSET
1061        skb->end      = size;
1062        off           = nhead;
1063#else
1064        skb->end      = skb->head + size;
1065#endif
1066        /* {transport,network,mac}_header and tail are relative to skb->head */
1067        skb->tail             += off;
1068        skb->transport_header += off;
1069        skb->network_header   += off;
1070        if (skb_mac_header_was_set(skb))
1071                skb->mac_header += off;
1072        /* Only adjust this if it actually is csum_start rather than csum */
1073        if (skb->ip_summed == CHECKSUM_PARTIAL)
1074                skb->csum_start += nhead;
1075        skb->cloned   = 0;
1076        skb->hdr_len  = 0;
1077        skb->nohdr    = 0;
1078        atomic_set(&skb_shinfo(skb)->dataref, 1);
1079        return 0;
1080
1081nofrags:
1082        kfree(data);
1083nodata:
1084        return -ENOMEM;
1085}
1086EXPORT_SYMBOL(pskb_expand_head);
1087
1088/* Make private copy of skb with writable head and some headroom */
1089
1090struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1091{
1092        struct sk_buff *skb2;
1093        int delta = headroom - skb_headroom(skb);
1094
1095        if (delta <= 0)
1096                skb2 = pskb_copy(skb, GFP_ATOMIC);
1097        else {
1098                skb2 = skb_clone(skb, GFP_ATOMIC);
1099                if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1100                                             GFP_ATOMIC)) {
1101                        kfree_skb(skb2);
1102                        skb2 = NULL;
1103                }
1104        }
1105        return skb2;
1106}
1107EXPORT_SYMBOL(skb_realloc_headroom);
1108
1109/**
1110 *      skb_copy_expand -       copy and expand sk_buff
1111 *      @skb: buffer to copy
1112 *      @newheadroom: new free bytes at head
1113 *      @newtailroom: new free bytes at tail
1114 *      @gfp_mask: allocation priority
1115 *
1116 *      Make a copy of both an &sk_buff and its data and while doing so
1117 *      allocate additional space.
1118 *
1119 *      This is used when the caller wishes to modify the data and needs a
1120 *      private copy of the data to alter as well as more space for new fields.
1121 *      Returns %NULL on failure or the pointer to the buffer
1122 *      on success. The returned buffer has a reference count of 1.
1123 *
1124 *      You must pass %GFP_ATOMIC as the allocation priority if this function
1125 *      is called from an interrupt.
1126 */
1127struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1128                                int newheadroom, int newtailroom,
1129                                gfp_t gfp_mask)
1130{
1131        /*
1132         *      Allocate the copy buffer
1133         */
1134        struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1135                                        gfp_mask, skb_alloc_rx_flag(skb),
1136                                        NUMA_NO_NODE);
1137        int oldheadroom = skb_headroom(skb);
1138        int head_copy_len, head_copy_off;
1139        int off;
1140
1141        if (!n)
1142                return NULL;
1143
1144        skb_reserve(n, newheadroom);
1145
1146        /* Set the tail pointer and length */
1147        skb_put(n, skb->len);
1148
1149        head_copy_len = oldheadroom;
1150        head_copy_off = 0;
1151        if (newheadroom <= head_copy_len)
1152                head_copy_len = newheadroom;
1153        else
1154                head_copy_off = newheadroom - head_copy_len;
1155
1156        /* Copy the linear header and data. */
1157        if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1158                          skb->len + head_copy_len))
1159                BUG();
1160
1161        copy_skb_header(n, skb);
1162
1163        off                  = newheadroom - oldheadroom;
1164        if (n->ip_summed == CHECKSUM_PARTIAL)
1165                n->csum_start += off;
1166#ifdef NET_SKBUFF_DATA_USES_OFFSET
1167        n->transport_header += off;
1168        n->network_header   += off;
1169        if (skb_mac_header_was_set(skb))
1170                n->mac_header += off;
1171#endif
1172
1173        return n;
1174}
1175EXPORT_SYMBOL(skb_copy_expand);
1176
1177/**
1178 *      skb_pad                 -       zero pad the tail of an skb
1179 *      @skb: buffer to pad
1180 *      @pad: space to pad
1181 *
1182 *      Ensure that a buffer is followed by a padding area that is zero
1183 *      filled. Used by network drivers which may DMA or transfer data
1184 *      beyond the buffer end onto the wire.
1185 *
1186 *      May return error in out of memory cases. The skb is freed on error.
1187 */
1188
1189int skb_pad(struct sk_buff *skb, int pad)
1190{
1191        int err;
1192        int ntail;
1193
1194        /* If the skbuff is non linear tailroom is always zero.. */
1195        if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
1196                memset(skb->data+skb->len, 0, pad);
1197                return 0;
1198        }
1199
1200        ntail = skb->data_len + pad - (skb->end - skb->tail);
1201        if (likely(skb_cloned(skb) || ntail > 0)) {
1202                err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1203                if (unlikely(err))
1204                        goto free_skb;
1205        }
1206
1207        /* FIXME: The use of this function with non-linear skb's really needs
1208         * to be audited.
1209         */
1210        err = skb_linearize(skb);
1211        if (unlikely(err))
1212                goto free_skb;
1213
1214        memset(skb->data + skb->len, 0, pad);
1215        return 0;
1216
1217free_skb:
1218        kfree_skb(skb);
1219        return err;
1220}
1221EXPORT_SYMBOL(skb_pad);
1222
1223/**
1224 *      skb_put - add data to a buffer
1225 *      @skb: buffer to use
1226 *      @len: amount of data to add
1227 *
1228 *      This function extends the used data area of the buffer. If this would
1229 *      exceed the total buffer size the kernel will panic. A pointer to the
1230 *      first byte of the extra data is returned.
1231 */
1232unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
1233{
1234        unsigned char *tmp = skb_tail_pointer(skb);
1235        SKB_LINEAR_ASSERT(skb);
1236        skb->tail += len;
1237        skb->len  += len;
1238        if (unlikely(skb->tail > skb->end))
1239                skb_over_panic(skb, len, __builtin_return_address(0));
1240        return tmp;
1241}
1242EXPORT_SYMBOL(skb_put);
1243
1244/**
1245 *      skb_push - add data to the start of a buffer
1246 *      @skb: buffer to use
1247 *      @len: amount of data to add
1248 *
1249 *      This function extends the used data area of the buffer at the buffer
1250 *      start. If this would exceed the total buffer headroom the kernel will
1251 *      panic. A pointer to the first byte of the extra data is returned.
1252 */
1253unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
1254{
1255        skb->data -= len;
1256        skb->len  += len;
1257        if (unlikely(skb->data<skb->head))
1258                skb_under_panic(skb, len, __builtin_return_address(0));
1259        return skb->data;
1260}
1261EXPORT_SYMBOL(skb_push);
1262
1263/**
1264 *      skb_pull - remove data from the start of a buffer
1265 *      @skb: buffer to use
1266 *      @len: amount of data to remove
1267 *
1268 *      This function removes data from the start of a buffer, returning
1269 *      the memory to the headroom. A pointer to the next data in the buffer
1270 *      is returned. Once the data has been pulled future pushes will overwrite
1271 *      the old data.
1272 */
1273unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
1274{
1275        return skb_pull_inline(skb, len);
1276}
1277EXPORT_SYMBOL(skb_pull);
1278
1279/**
1280 *      skb_trim - remove end from a buffer
1281 *      @skb: buffer to alter
1282 *      @len: new length
1283 *
1284 *      Cut the length of a buffer down by removing data from the tail. If
1285 *      the buffer is already under the length specified it is not modified.
1286 *      The skb must be linear.
1287 */
1288void skb_trim(struct sk_buff *skb, unsigned int len)
1289{
1290        if (skb->len > len)
1291                __skb_trim(skb, len);
1292}
1293EXPORT_SYMBOL(skb_trim);
1294
1295/* Trims skb to length len. It can change skb pointers.
1296 */
1297
1298int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1299{
1300        struct sk_buff **fragp;
1301        struct sk_buff *frag;
1302        int offset = skb_headlen(skb);
1303        int nfrags = skb_shinfo(skb)->nr_frags;
1304        int i;
1305        int err;
1306
1307        if (skb_cloned(skb) &&
1308            unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1309                return err;
1310
1311        i = 0;
1312        if (offset >= len)
1313                goto drop_pages;
1314
1315        for (; i < nfrags; i++) {
1316                int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1317
1318                if (end < len) {
1319                        offset = end;
1320                        continue;
1321                }
1322
1323                skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1324
1325drop_pages:
1326                skb_shinfo(skb)->nr_frags = i;
1327
1328                for (; i < nfrags; i++)
1329                        skb_frag_unref(skb, i);
1330
1331                if (skb_has_frag_list(skb))
1332                        skb_drop_fraglist(skb);
1333                goto done;
1334        }
1335
1336        for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1337             fragp = &frag->next) {
1338                int end = offset + frag->len;
1339
1340                if (skb_shared(frag)) {
1341                        struct sk_buff *nfrag;
1342
1343                        nfrag = skb_clone(frag, GFP_ATOMIC);
1344                        if (unlikely(!nfrag))
1345                                return -ENOMEM;
1346
1347                        nfrag->next = frag->next;
1348                        consume_skb(frag);
1349                        frag = nfrag;
1350                        *fragp = frag;
1351                }
1352
1353                if (end < len) {
1354                        offset = end;
1355                        continue;
1356                }
1357
1358                if (end > len &&
1359                    unlikely((err = pskb_trim(frag, len - offset))))
1360                        return err;
1361
1362                if (frag->next)
1363                        skb_drop_list(&frag->next);
1364                break;
1365        }
1366
1367done:
1368        if (len > skb_headlen(skb)) {
1369                skb->data_len -= skb->len - len;
1370                skb->len       = len;
1371        } else {
1372                skb->len       = len;
1373                skb->data_len  = 0;
1374                skb_set_tail_pointer(skb, len);
1375        }
1376
1377        return 0;
1378}
1379EXPORT_SYMBOL(___pskb_trim);
1380
1381/**
1382 *      __pskb_pull_tail - advance tail of skb header
1383 *      @skb: buffer to reallocate
1384 *      @delta: number of bytes to advance tail
1385 *
1386 *      The function makes a sense only on a fragmented &sk_buff,
1387 *      it expands header moving its tail forward and copying necessary
1388 *      data from fragmented part.
1389 *
1390 *      &sk_buff MUST have reference count of 1.
1391 *
1392 *      Returns %NULL (and &sk_buff does not change) if pull failed
1393 *      or value of new tail of skb in the case of success.
1394 *
1395 *      All the pointers pointing into skb header may change and must be
1396 *      reloaded after call to this function.
1397 */
1398
1399/* Moves tail of skb head forward, copying data from fragmented part,
1400 * when it is necessary.
1401 * 1. It may fail due to malloc failure.
1402 * 2. It may change skb pointers.
1403 *
1404 * It is pretty complicated. Luckily, it is called only in exceptional cases.
1405 */
1406unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1407{
1408        /* If skb has not enough free space at tail, get new one
1409         * plus 128 bytes for future expansions. If we have enough
1410         * room at tail, reallocate without expansion only if skb is cloned.
1411         */
1412        int i, k, eat = (skb->tail + delta) - skb->end;
1413
1414        if (eat > 0 || skb_cloned(skb)) {
1415                if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1416                                     GFP_ATOMIC))
1417                        return NULL;
1418        }
1419
1420        if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1421                BUG();
1422
1423        /* Optimization: no fragments, no reasons to preestimate
1424         * size of pulled pages. Superb.
1425         */
1426        if (!skb_has_frag_list(skb))
1427                goto pull_pages;
1428
1429        /* Estimate size of pulled pages. */
1430        eat = delta;
1431        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1432                int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1433
1434                if (size >= eat)
1435                        goto pull_pages;
1436                eat -= size;
1437        }
1438
1439        /* If we need update frag list, we are in troubles.
1440         * Certainly, it possible to add an offset to skb data,
1441         * but taking into account that pulling is expected to
1442         * be very rare operation, it is worth to fight against
1443         * further bloating skb head and crucify ourselves here instead.
1444         * Pure masohism, indeed. 8)8)
1445         */
1446        if (eat) {
1447                struct sk_buff *list = skb_shinfo(skb)->frag_list;
1448                struct sk_buff *clone = NULL;
1449                struct sk_buff *insp = NULL;
1450
1451                do {
1452                        BUG_ON(!list);
1453
1454                        if (list->len <= eat) {
1455                                /* Eaten as whole. */
1456                                eat -= list->len;
1457                                list = list->next;
1458                                insp = list;
1459                        } else {
1460                                /* Eaten partially. */
1461
1462                                if (skb_shared(list)) {
1463                                        /* Sucks! We need to fork list. :-( */
1464                                        clone = skb_clone(list, GFP_ATOMIC);
1465                                        if (!clone)
1466                                                return NULL;
1467                                        insp = list->next;
1468                                        list = clone;
1469                                } else {
1470                                        /* This may be pulled without
1471                                         * problems. */
1472                                        insp = list;
1473                                }
1474                                if (!pskb_pull(list, eat)) {
1475                                        kfree_skb(clone);
1476                                        return NULL;
1477                                }
1478                                break;
1479                        }
1480                } while (eat);
1481
1482                /* Free pulled out fragments. */
1483                while ((list = skb_shinfo(skb)->frag_list) != insp) {
1484                        skb_shinfo(skb)->frag_list = list->next;
1485                        kfree_skb(list);
1486                }
1487                /* And insert new clone at head. */
1488                if (clone) {
1489                        clone->next = list;
1490                        skb_shinfo(skb)->frag_list = clone;
1491                }
1492        }
1493        /* Success! Now we may commit changes to skb data. */
1494
1495pull_pages:
1496        eat = delta;
1497        k = 0;
1498        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1499                int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1500
1501                if (size <= eat) {
1502                        skb_frag_unref(skb, i);
1503                        eat -= size;
1504                } else {
1505                        skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1506                        if (eat) {
1507                                skb_shinfo(skb)->frags[k].page_offset += eat;
1508                                skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1509                                eat = 0;
1510                        }
1511                        k++;
1512                }
1513        }
1514        skb_shinfo(skb)->nr_frags = k;
1515
1516        skb->tail     += delta;
1517        skb->data_len -= delta;
1518
1519        return skb_tail_pointer(skb);
1520}
1521EXPORT_SYMBOL(__pskb_pull_tail);
1522
1523/**
1524 *      skb_copy_bits - copy bits from skb to kernel buffer
1525 *      @skb: source skb
1526 *      @offset: offset in source
1527 *      @to: destination buffer
1528 *      @len: number of bytes to copy
1529 *
1530 *      Copy the specified number of bytes from the source skb to the
1531 *      destination buffer.
1532 *
1533 *      CAUTION ! :
1534 *              If its prototype is ever changed,
1535 *              check arch/{*}/net/{*}.S files,
1536 *              since it is called from BPF assembly code.
1537 */
1538int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1539{
1540        int start = skb_headlen(skb);
1541        struct sk_buff *frag_iter;
1542        int i, copy;
1543
1544        if (offset > (int)skb->len - len)
1545                goto fault;
1546
1547        /* Copy header. */
1548        if ((copy = start - offset) > 0) {
1549                if (copy > len)
1550                        copy = len;
1551                skb_copy_from_linear_data_offset(skb, offset, to, copy);
1552                if ((len -= copy) == 0)
1553                        return 0;
1554                offset += copy;
1555                to     += copy;
1556        }
1557
1558        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1559                int end;
1560                skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1561
1562                WARN_ON(start > offset + len);
1563
1564                end = start + skb_frag_size(f);
1565                if ((copy = end - offset) > 0) {
1566                        u8 *vaddr;
1567
1568                        if (copy > len)
1569                                copy = len;
1570
1571                        vaddr = kmap_atomic(skb_frag_page(f));
1572                        memcpy(to,
1573                               vaddr + f->page_offset + offset - start,
1574                               copy);
1575                        kunmap_atomic(vaddr);
1576
1577                        if ((len -= copy) == 0)
1578                                return 0;
1579                        offset += copy;
1580                        to     += copy;
1581                }
1582                start = end;
1583        }
1584
1585        skb_walk_frags(skb, frag_iter) {
1586                int end;
1587
1588                WARN_ON(start > offset + len);
1589
1590                end = start + frag_iter->len;
1591                if ((copy = end - offset) > 0) {
1592                        if (copy > len)
1593                                copy = len;
1594                        if (skb_copy_bits(frag_iter, offset - start, to, copy))
1595                                goto fault;
1596                        if ((len -= copy) == 0)
1597                                return 0;
1598                        offset += copy;
1599                        to     += copy;
1600                }
1601                start = end;
1602        }
1603
1604        if (!len)
1605                return 0;
1606
1607fault:
1608        return -EFAULT;
1609}
1610EXPORT_SYMBOL(skb_copy_bits);
1611
1612/*
1613 * Callback from splice_to_pipe(), if we need to release some pages
1614 * at the end of the spd in case we error'ed out in filling the pipe.
1615 */
1616static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1617{
1618        put_page(spd->pages[i]);
1619}
1620
1621static struct page *linear_to_page(struct page *page, unsigned int *len,
1622                                   unsigned int *offset,
1623                                   struct sk_buff *skb, struct sock *sk)
1624{
1625        struct page_frag *pfrag = sk_page_frag(sk);
1626
1627        if (!sk_page_frag_refill(sk, pfrag))
1628                return NULL;
1629
1630        *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
1631
1632        memcpy(page_address(pfrag->page) + pfrag->offset,
1633               page_address(page) + *offset, *len);
1634        *offset = pfrag->offset;
1635        pfrag->offset += *len;
1636
1637        return pfrag->page;
1638}
1639
1640static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
1641                             struct page *page,
1642                             unsigned int offset)
1643{
1644        return  spd->nr_pages &&
1645                spd->pages[spd->nr_pages - 1] == page &&
1646                (spd->partial[spd->nr_pages - 1].offset +
1647                 spd->partial[spd->nr_pages - 1].len == offset);
1648}
1649
1650/*
1651 * Fill page/offset/length into spd, if it can hold more pages.
1652 */
1653static bool spd_fill_page(struct splice_pipe_desc *spd,
1654                          struct pipe_inode_info *pipe, struct page *page,
1655                          unsigned int *len, unsigned int offset,
1656                          struct sk_buff *skb, bool linear,
1657                          struct sock *sk)
1658{
1659        if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
1660                return true;
1661
1662        if (linear) {
1663                page = linear_to_page(page, len, &offset, skb, sk);
1664                if (!page)
1665                        return true;
1666        }
1667        if (spd_can_coalesce(spd, page, offset)) {
1668                spd->partial[spd->nr_pages - 1].len += *len;
1669                return false;
1670        }
1671        get_page(page);
1672        spd->pages[spd->nr_pages] = page;
1673        spd->partial[spd->nr_pages].len = *len;
1674        spd->partial[spd->nr_pages].offset = offset;
1675        spd->nr_pages++;
1676
1677        return false;
1678}
1679
1680static inline void __segment_seek(struct page **page, unsigned int *poff,
1681                                  unsigned int *plen, unsigned int off)
1682{
1683        unsigned long n;
1684
1685        *poff += off;
1686        n = *poff / PAGE_SIZE;
1687        if (n)
1688                *page = nth_page(*page, n);
1689
1690        *poff = *poff % PAGE_SIZE;
1691        *plen -= off;
1692}
1693
1694static bool __splice_segment(struct page *page, unsigned int poff,
1695                             unsigned int plen, unsigned int *off,
1696                             unsigned int *len, struct sk_buff *skb,
1697                             struct splice_pipe_desc *spd, bool linear,
1698                             struct sock *sk,
1699                             struct pipe_inode_info *pipe)
1700{
1701        if (!*len)
1702                return true;
1703
1704        /* skip this segment if already processed */
1705        if (*off >= plen) {
1706                *off -= plen;
1707                return false;
1708        }
1709
1710        /* ignore any bits we already processed */
1711        if (*off) {
1712                __segment_seek(&page, &poff, &plen, *off);
1713                *off = 0;
1714        }
1715
1716        do {
1717                unsigned int flen = min(*len, plen);
1718
1719                /* the linear region may spread across several pages  */
1720                flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
1721
1722                if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
1723                        return true;
1724
1725                __segment_seek(&page, &poff, &plen, flen);
1726                *len -= flen;
1727
1728        } while (*len && plen);
1729
1730        return false;
1731}
1732
1733/*
1734 * Map linear and fragment data from the skb to spd. It reports true if the
1735 * pipe is full or if we already spliced the requested length.
1736 */
1737static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1738                              unsigned int *offset, unsigned int *len,
1739                              struct splice_pipe_desc *spd, struct sock *sk)
1740{
1741        int seg;
1742
1743        /* map the linear part :
1744         * If skb->head_frag is set, this 'linear' part is backed by a
1745         * fragment, and if the head is not shared with any clones then
1746         * we can avoid a copy since we own the head portion of this page.
1747         */
1748        if (__splice_segment(virt_to_page(skb->data),
1749                             (unsigned long) skb->data & (PAGE_SIZE - 1),
1750                             skb_headlen(skb),
1751                             offset, len, skb, spd,
1752                             skb_head_is_locked(skb),
1753                             sk, pipe))
1754                return true;
1755
1756        /*
1757         * then map the fragments
1758         */
1759        for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1760                const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1761
1762                if (__splice_segment(skb_frag_page(f),
1763                                     f->page_offset, skb_frag_size(f),
1764                                     offset, len, skb, spd, false, sk, pipe))
1765                        return true;
1766        }
1767
1768        return false;
1769}
1770
1771/*
1772 * Map data from the skb to a pipe. Should handle both the linear part,
1773 * the fragments, and the frag list. It does NOT handle frag lists within
1774 * the frag list, if such a thing exists. We'd probably need to recurse to
1775 * handle that cleanly.
1776 */
1777int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1778                    struct pipe_inode_info *pipe, unsigned int tlen,
1779                    unsigned int flags)
1780{
1781        struct partial_page partial[MAX_SKB_FRAGS];
1782        struct page *pages[MAX_SKB_FRAGS];
1783        struct splice_pipe_desc spd = {
1784                .pages = pages,
1785                .partial = partial,
1786                .nr_pages_max = MAX_SKB_FRAGS,
1787                .flags = flags,
1788                .ops = &sock_pipe_buf_ops,
1789                .spd_release = sock_spd_release,
1790        };
1791        struct sk_buff *frag_iter;
1792        struct sock *sk = skb->sk;
1793        int ret = 0;
1794
1795        /*
1796         * __skb_splice_bits() only fails if the output has no room left,
1797         * so no point in going over the frag_list for the error case.
1798         */
1799        if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
1800                goto done;
1801        else if (!tlen)
1802                goto done;
1803
1804        /*
1805         * now see if we have a frag_list to map
1806         */
1807        skb_walk_frags(skb, frag_iter) {
1808                if (!tlen)
1809                        break;
1810                if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
1811                        break;
1812        }
1813
1814done:
1815        if (spd.nr_pages) {
1816                /*
1817                 * Drop the socket lock, otherwise we have reverse
1818                 * locking dependencies between sk_lock and i_mutex
1819                 * here as compared to sendfile(). We enter here
1820                 * with the socket lock held, and splice_to_pipe() will
1821                 * grab the pipe inode lock. For sendfile() emulation,
1822                 * we call into ->sendpage() with the i_mutex lock held
1823                 * and networking will grab the socket lock.
1824                 */
1825                release_sock(sk);
1826                ret = splice_to_pipe(pipe, &spd);
1827                lock_sock(sk);
1828        }
1829
1830        return ret;
1831}
1832
1833/**
1834 *      skb_store_bits - store bits from kernel buffer to skb
1835 *      @skb: destination buffer
1836 *      @offset: offset in destination
1837 *      @from: source buffer
1838 *      @len: number of bytes to copy
1839 *
1840 *      Copy the specified number of bytes from the source buffer to the
1841 *      destination skb.  This function handles all the messy bits of
1842 *      traversing fragment lists and such.
1843 */
1844
1845int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1846{
1847        int start = skb_headlen(skb);
1848        struct sk_buff *frag_iter;
1849        int i, copy;
1850
1851        if (offset > (int)skb->len - len)
1852                goto fault;
1853
1854        if ((copy = start - offset) > 0) {
1855                if (copy > len)
1856                        copy = len;
1857                skb_copy_to_linear_data_offset(skb, offset, from, copy);
1858                if ((len -= copy) == 0)
1859                        return 0;
1860                offset += copy;
1861                from += copy;
1862        }
1863
1864        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1865                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1866                int end;
1867
1868                WARN_ON(start > offset + len);
1869
1870                end = start + skb_frag_size(frag);
1871                if ((copy = end - offset) > 0) {
1872                        u8 *vaddr;
1873
1874                        if (copy > len)
1875                                copy = len;
1876
1877                        vaddr = kmap_atomic(skb_frag_page(frag));
1878                        memcpy(vaddr + frag->page_offset + offset - start,
1879                               from, copy);
1880                        kunmap_atomic(vaddr);
1881
1882                        if ((len -= copy) == 0)
1883                                return 0;
1884                        offset += copy;
1885                        from += copy;
1886                }
1887                start = end;
1888        }
1889
1890        skb_walk_frags(skb, frag_iter) {
1891                int end;
1892
1893                WARN_ON(start > offset + len);
1894
1895                end = start + frag_iter->len;
1896                if ((copy = end - offset) > 0) {
1897                        if (copy > len)
1898                                copy = len;
1899                        if (skb_store_bits(frag_iter, offset - start,
1900                                           from, copy))
1901                                goto fault;
1902                        if ((len -= copy) == 0)
1903                                return 0;
1904                        offset += copy;
1905                        from += copy;
1906                }
1907                start = end;
1908        }
1909        if (!len)
1910                return 0;
1911
1912fault:
1913        return -EFAULT;
1914}
1915EXPORT_SYMBOL(skb_store_bits);
1916
1917/* Checksum skb data. */
1918
1919__wsum skb_checksum(const struct sk_buff *skb, int offset,
1920                          int len, __wsum csum)
1921{
1922        int start = skb_headlen(skb);
1923        int i, copy = start - offset;
1924        struct sk_buff *frag_iter;
1925        int pos = 0;
1926
1927        /* Checksum header. */
1928        if (copy > 0) {
1929                if (copy > len)
1930                        copy = len;
1931                csum = csum_partial(skb->data + offset, copy, csum);
1932                if ((len -= copy) == 0)
1933                        return csum;
1934                offset += copy;
1935                pos     = copy;
1936        }
1937
1938        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1939                int end;
1940                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1941
1942                WARN_ON(start > offset + len);
1943
1944                end = start + skb_frag_size(frag);
1945                if ((copy = end - offset) > 0) {
1946                        __wsum csum2;
1947                        u8 *vaddr;
1948
1949                        if (copy > len)
1950                                copy = len;
1951                        vaddr = kmap_atomic(skb_frag_page(frag));
1952                        csum2 = csum_partial(vaddr + frag->page_offset +
1953                                             offset - start, copy, 0);
1954                        kunmap_atomic(vaddr);
1955                        csum = csum_block_add(csum, csum2, pos);
1956                        if (!(len -= copy))
1957                                return csum;
1958                        offset += copy;
1959                        pos    += copy;
1960                }
1961                start = end;
1962        }
1963
1964        skb_walk_frags(skb, frag_iter) {
1965                int end;
1966
1967                WARN_ON(start > offset + len);
1968
1969                end = start + frag_iter->len;
1970                if ((copy = end - offset) > 0) {
1971                        __wsum csum2;
1972                        if (copy > len)
1973                                copy = len;
1974                        csum2 = skb_checksum(frag_iter, offset - start,
1975                                             copy, 0);
1976                        csum = csum_block_add(csum, csum2, pos);
1977                        if ((len -= copy) == 0)
1978                                return csum;
1979                        offset += copy;
1980                        pos    += copy;
1981                }
1982                start = end;
1983        }
1984        BUG_ON(len);
1985
1986        return csum;
1987}
1988EXPORT_SYMBOL(skb_checksum);
1989
1990/* Both of above in one bottle. */
1991
1992__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1993                                    u8 *to, int len, __wsum csum)
1994{
1995        int start = skb_headlen(skb);
1996        int i, copy = start - offset;
1997        struct sk_buff *frag_iter;
1998        int pos = 0;
1999
2000        /* Copy header. */
2001        if (copy > 0) {
2002                if (copy > len)
2003                        copy = len;
2004                csum = csum_partial_copy_nocheck(skb->data + offset, to,
2005                                                 copy, csum);
2006                if ((len -= copy) == 0)
2007                        return csum;
2008                offset += copy;
2009                to     += copy;
2010                pos     = copy;
2011        }
2012
2013        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2014                int end;
2015
2016                WARN_ON(start > offset + len);
2017
2018                end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2019                if ((copy = end - offset) > 0) {
2020                        __wsum csum2;
2021                        u8 *vaddr;
2022                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2023
2024                        if (copy > len)
2025                                copy = len;
2026                        vaddr = kmap_atomic(skb_frag_page(frag));
2027                        csum2 = csum_partial_copy_nocheck(vaddr +
2028                                                          frag->page_offset +
2029                                                          offset - start, to,
2030                                                          copy, 0);
2031                        kunmap_atomic(vaddr);
2032                        csum = csum_block_add(csum, csum2, pos);
2033                        if (!(len -= copy))
2034                                return csum;
2035                        offset += copy;
2036                        to     += copy;
2037                        pos    += copy;
2038                }
2039                start = end;
2040        }
2041
2042        skb_walk_frags(skb, frag_iter) {
2043                __wsum csum2;
2044                int end;
2045
2046                WARN_ON(start > offset + len);
2047
2048                end = start + frag_iter->len;
2049                if ((copy = end - offset) > 0) {
2050                        if (copy > len)
2051                                copy = len;
2052                        csum2 = skb_copy_and_csum_bits(frag_iter,
2053                                                       offset - start,
2054                                                       to, copy, 0);
2055                        csum = csum_block_add(csum, csum2, pos);
2056                        if ((len -= copy) == 0)
2057                                return csum;
2058                        offset += copy;
2059                        to     += copy;
2060                        pos    += copy;
2061                }
2062                start = end;
2063        }
2064        BUG_ON(len);
2065        return csum;
2066}
2067EXPORT_SYMBOL(skb_copy_and_csum_bits);
2068
2069void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
2070{
2071        __wsum csum;
2072        long csstart;
2073
2074        if (skb->ip_summed == CHECKSUM_PARTIAL)
2075                csstart = skb_checksum_start_offset(skb);
2076        else
2077                csstart = skb_headlen(skb);
2078
2079        BUG_ON(csstart > skb_headlen(skb));
2080
2081        skb_copy_from_linear_data(skb, to, csstart);
2082
2083        csum = 0;
2084        if (csstart != skb->len)
2085                csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
2086                                              skb->len - csstart, 0);
2087
2088        if (skb->ip_summed == CHECKSUM_PARTIAL) {
2089                long csstuff = csstart + skb->csum_offset;
2090
2091                *((__sum16 *)(to + csstuff)) = csum_fold(csum);
2092        }
2093}
2094EXPORT_SYMBOL(skb_copy_and_csum_dev);
2095
2096/**
2097 *      skb_dequeue - remove from the head of the queue
2098 *      @list: list to dequeue from
2099 *
2100 *      Remove the head of the list. The list lock is taken so the function
2101 *      may be used safely with other locking list functions. The head item is
2102 *      returned or %NULL if the list is empty.
2103 */
2104
2105struct sk_buff *skb_dequeue(struct sk_buff_head *list)
2106{
2107        unsigned long flags;
2108        struct sk_buff *result;
2109
2110        spin_lock_irqsave(&list->lock, flags);
2111        result = __skb_dequeue(list);
2112        spin_unlock_irqrestore(&list->lock, flags);
2113        return result;
2114}
2115EXPORT_SYMBOL(skb_dequeue);
2116
2117/**
2118 *      skb_dequeue_tail - remove from the tail of the queue
2119 *      @list: list to dequeue from
2120 *
2121 *      Remove the tail of the list. The list lock is taken so the function
2122 *      may be used safely with other locking list functions. The tail item is
2123 *      returned or %NULL if the list is empty.
2124 */
2125struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
2126{
2127        unsigned long flags;
2128        struct sk_buff *result;
2129
2130        spin_lock_irqsave(&list->lock, flags);
2131        result = __skb_dequeue_tail(list);
2132        spin_unlock_irqrestore(&list->lock, flags);
2133        return result;
2134}
2135EXPORT_SYMBOL(skb_dequeue_tail);
2136
2137/**
2138 *      skb_queue_purge - empty a list
2139 *      @list: list to empty
2140 *
2141 *      Delete all buffers on an &sk_buff list. Each buffer is removed from
2142 *      the list and one reference dropped. This function takes the list
2143 *      lock and is atomic with respect to other list locking functions.
2144 */
2145void skb_queue_purge(struct sk_buff_head *list)
2146{
2147        struct sk_buff *skb;
2148        while ((skb = skb_dequeue(list)) != NULL)
2149                kfree_skb(skb);
2150}
2151EXPORT_SYMBOL(skb_queue_purge);
2152
2153/**
2154 *      skb_queue_head - queue a buffer at the list head
2155 *      @list: list to use
2156 *      @newsk: buffer to queue
2157 *
2158 *      Queue a buffer at the start of the list. This function takes the
2159 *      list lock and can be used safely with other locking &sk_buff functions
2160 *      safely.
2161 *
2162 *      A buffer cannot be placed on two lists at the same time.
2163 */
2164void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
2165{
2166        unsigned long flags;
2167
2168        spin_lock_irqsave(&list->lock, flags);
2169        __skb_queue_head(list, newsk);
2170        spin_unlock_irqrestore(&list->lock, flags);
2171}
2172EXPORT_SYMBOL(skb_queue_head);
2173
2174/**
2175 *      skb_queue_tail - queue a buffer at the list tail
2176 *      @list: list to use
2177 *      @newsk: buffer to queue
2178 *
2179 *      Queue a buffer at the tail of the list. This function takes the
2180 *      list lock and can be used safely with other locking &sk_buff functions
2181 *      safely.
2182 *
2183 *      A buffer cannot be placed on two lists at the same time.
2184 */
2185void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
2186{
2187        unsigned long flags;
2188
2189        spin_lock_irqsave(&list->lock, flags);
2190        __skb_queue_tail(list, newsk);
2191        spin_unlock_irqrestore(&list->lock, flags);
2192}
2193EXPORT_SYMBOL(skb_queue_tail);
2194
2195/**
2196 *      skb_unlink      -       remove a buffer from a list
2197 *      @skb: buffer to remove
2198 *      @list: list to use
2199 *
2200 *      Remove a packet from a list. The list locks are taken and this
2201 *      function is atomic with respect to other list locked calls
2202 *
2203 *      You must know what list the SKB is on.
2204 */
2205void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2206{
2207        unsigned long flags;
2208
2209        spin_lock_irqsave(&list->lock, flags);
2210        __skb_unlink(skb, list);
2211        spin_unlock_irqrestore(&list->lock, flags);
2212}
2213EXPORT_SYMBOL(skb_unlink);
2214
2215/**
2216 *      skb_append      -       append a buffer
2217 *      @old: buffer to insert after
2218 *      @newsk: buffer to insert
2219 *      @list: list to use
2220 *
2221 *      Place a packet after a given packet in a list. The list locks are taken
2222 *      and this function is atomic with respect to other list locked calls.
2223 *      A buffer cannot be placed on two lists at the same time.
2224 */
2225void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2226{
2227        unsigned long flags;
2228
2229        spin_lock_irqsave(&list->lock, flags);
2230        __skb_queue_after(list, old, newsk);
2231        spin_unlock_irqrestore(&list->lock, flags);
2232}
2233EXPORT_SYMBOL(skb_append);
2234
2235/**
2236 *      skb_insert      -       insert a buffer
2237 *      @old: buffer to insert before
2238 *      @newsk: buffer to insert
2239 *      @list: list to use
2240 *
2241 *      Place a packet before a given packet in a list. The list locks are
2242 *      taken and this function is atomic with respect to other list locked
2243 *      calls.
2244 *
2245 *      A buffer cannot be placed on two lists at the same time.
2246 */
2247void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2248{
2249        unsigned long flags;
2250
2251        spin_lock_irqsave(&list->lock, flags);
2252        __skb_insert(newsk, old->prev, old, list);
2253        spin_unlock_irqrestore(&list->lock, flags);
2254}
2255EXPORT_SYMBOL(skb_insert);
2256
2257static inline void skb_split_inside_header(struct sk_buff *skb,
2258                                           struct sk_buff* skb1,
2259                                           const u32 len, const int pos)
2260{
2261        int i;
2262
2263        skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2264                                         pos - len);
2265        /* And move data appendix as is. */
2266        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2267                skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2268
2269        skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2270        skb_shinfo(skb)->nr_frags  = 0;
2271        skb1->data_len             = skb->data_len;
2272        skb1->len                  += skb1->data_len;
2273        skb->data_len              = 0;
2274        skb->len                   = len;
2275        skb_set_tail_pointer(skb, len);
2276}
2277
2278static inline void skb_split_no_header(struct sk_buff *skb,
2279                                       struct sk_buff* skb1,
2280                                       const u32 len, int pos)
2281{
2282        int i, k = 0;
2283        const int nfrags = skb_shinfo(skb)->nr_frags;
2284
2285        skb_shinfo(skb)->nr_frags = 0;
2286        skb1->len                 = skb1->data_len = skb->len - len;
2287        skb->len                  = len;
2288        skb->data_len             = len - pos;
2289
2290        for (i = 0; i < nfrags; i++) {
2291                int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2292
2293                if (pos + size > len) {
2294                        skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
2295
2296                        if (pos < len) {
2297                                /* Split frag.
2298                                 * We have two variants in this case:
2299                                 * 1. Move all the frag to the second
2300                                 *    part, if it is possible. F.e.
2301                                 *    this approach is mandatory for TUX,
2302                                 *    where splitting is expensive.
2303                                 * 2. Split is accurately. We make this.
2304                                 */
2305                                skb_frag_ref(skb, i);
2306                                skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2307                                skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
2308                                skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
2309                                skb_shinfo(skb)->nr_frags++;
2310                        }
2311                        k++;
2312                } else
2313                        skb_shinfo(skb)->nr_frags++;
2314                pos += size;
2315        }
2316        skb_shinfo(skb1)->nr_frags = k;
2317}
2318
2319/**
2320 * skb_split - Split fragmented skb to two parts at length len.
2321 * @skb: the buffer to split
2322 * @skb1: the buffer to receive the second part
2323 * @len: new length for skb
2324 */
2325void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2326{
2327        int pos = skb_headlen(skb);
2328
2329        if (len < pos)  /* Split line is inside header. */
2330                skb_split_inside_header(skb, skb1, len, pos);
2331        else            /* Second chunk has no header, nothing to copy. */
2332                skb_split_no_header(skb, skb1, len, pos);
2333}
2334EXPORT_SYMBOL(skb_split);
2335
2336/* Shifting from/to a cloned skb is a no-go.
2337 *
2338 * Caller cannot keep skb_shinfo related pointers past calling here!
2339 */
2340static int skb_prepare_for_shift(struct sk_buff *skb)
2341{
2342        return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2343}
2344
2345/**
2346 * skb_shift - Shifts paged data partially from skb to another
2347 * @tgt: buffer into which tail data gets added
2348 * @skb: buffer from which the paged data comes from
2349 * @shiftlen: shift up to this many bytes
2350 *
2351 * Attempts to shift up to shiftlen worth of bytes, which may be less than
2352 * the length of the skb, from skb to tgt. Returns number bytes shifted.
2353 * It's up to caller to free skb if everything was shifted.
2354 *
2355 * If @tgt runs out of frags, the whole operation is aborted.
2356 *
2357 * Skb cannot include anything else but paged data while tgt is allowed
2358 * to have non-paged data as well.
2359 *
2360 * TODO: full sized shift could be optimized but that would need
2361 * specialized skb free'er to handle frags without up-to-date nr_frags.
2362 */
2363int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2364{
2365        int from, to, merge, todo;
2366        struct skb_frag_struct *fragfrom, *fragto;
2367
2368        BUG_ON(shiftlen > skb->len);
2369        BUG_ON(skb_headlen(skb));       /* Would corrupt stream */
2370
2371        todo = shiftlen;
2372        from = 0;
2373        to = skb_shinfo(tgt)->nr_frags;
2374        fragfrom = &skb_shinfo(skb)->frags[from];
2375
2376        /* Actual merge is delayed until the point when we know we can
2377         * commit all, so that we don't have to undo partial changes
2378         */
2379        if (!to ||
2380            !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
2381                              fragfrom->page_offset)) {
2382                merge = -1;
2383        } else {
2384                merge = to - 1;
2385
2386                todo -= skb_frag_size(fragfrom);
2387                if (todo < 0) {
2388                        if (skb_prepare_for_shift(skb) ||
2389                            skb_prepare_for_shift(tgt))
2390                                return 0;
2391
2392                        /* All previous frag pointers might be stale! */
2393                        fragfrom = &skb_shinfo(skb)->frags[from];
2394                        fragto = &skb_shinfo(tgt)->frags[merge];
2395
2396                        skb_frag_size_add(fragto, shiftlen);
2397                        skb_frag_size_sub(fragfrom, shiftlen);
2398                        fragfrom->page_offset += shiftlen;
2399
2400                        goto onlymerged;
2401                }
2402
2403                from++;
2404        }
2405
2406        /* Skip full, not-fitting skb to avoid expensive operations */
2407        if ((shiftlen == skb->len) &&
2408            (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
2409                return 0;
2410
2411        if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2412                return 0;
2413
2414        while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2415                if (to == MAX_SKB_FRAGS)
2416                        return 0;
2417
2418                fragfrom = &skb_shinfo(skb)->frags[from];
2419                fragto = &skb_shinfo(tgt)->frags[to];
2420
2421                if (todo >= skb_frag_size(fragfrom)) {
2422                        *fragto = *fragfrom;
2423                        todo -= skb_frag_size(fragfrom);
2424                        from++;
2425                        to++;
2426
2427                } else {
2428                        __skb_frag_ref(fragfrom);
2429                        fragto->page = fragfrom->page;
2430                        fragto->page_offset = fragfrom->page_offset;
2431                        skb_frag_size_set(fragto, todo);
2432
2433                        fragfrom->page_offset += todo;
2434                        skb_frag_size_sub(fragfrom, todo);
2435                        todo = 0;
2436
2437                        to++;
2438                        break;
2439                }
2440        }
2441
2442        /* Ready to "commit" this state change to tgt */
2443        skb_shinfo(tgt)->nr_frags = to;
2444
2445        if (merge >= 0) {
2446                fragfrom = &skb_shinfo(skb)->frags[0];
2447                fragto = &skb_shinfo(tgt)->frags[merge];
2448
2449                skb_frag_size_add(fragto, skb_frag_size(fragfrom));
2450                __skb_frag_unref(fragfrom);
2451        }
2452
2453        /* Reposition in the original skb */
2454        to = 0;
2455        while (from < skb_shinfo(skb)->nr_frags)
2456                skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2457        skb_shinfo(skb)->nr_frags = to;
2458
2459        BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2460
2461onlymerged:
2462        /* Most likely the tgt won't ever need its checksum anymore, skb on
2463         * the other hand might need it if it needs to be resent
2464         */
2465        tgt->ip_summed = CHECKSUM_PARTIAL;
2466        skb->ip_summed = CHECKSUM_PARTIAL;
2467
2468        /* Yak, is it really working this way? Some helper please? */
2469        skb->len -= shiftlen;
2470        skb->data_len -= shiftlen;
2471        skb->truesize -= shiftlen;
2472        tgt->len += shiftlen;
2473        tgt->data_len += shiftlen;
2474        tgt->truesize += shiftlen;
2475
2476        return shiftlen;
2477}
2478
2479/**
2480 * skb_prepare_seq_read - Prepare a sequential read of skb data
2481 * @skb: the buffer to read
2482 * @from: lower offset of data to be read
2483 * @to: upper offset of data to be read
2484 * @st: state variable
2485 *
2486 * Initializes the specified state variable. Must be called before
2487 * invoking skb_seq_read() for the first time.
2488 */
2489void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2490                          unsigned int to, struct skb_seq_state *st)
2491{
2492        st->lower_offset = from;
2493        st->upper_offset = to;
2494        st->root_skb = st->cur_skb = skb;
2495        st->frag_idx = st->stepped_offset = 0;
2496        st->frag_data = NULL;
2497}
2498EXPORT_SYMBOL(skb_prepare_seq_read);
2499
2500/**
2501 * skb_seq_read - Sequentially read skb data
2502 * @consumed: number of bytes consumed by the caller so far
2503 * @data: destination pointer for data to be returned
2504 * @st: state variable
2505 *
2506 * Reads a block of skb data at &consumed relative to the
2507 * lower offset specified to skb_prepare_seq_read(). Assigns
2508 * the head of the data block to &data and returns the length
2509 * of the block or 0 if the end of the skb data or the upper
2510 * offset has been reached.
2511 *
2512 * The caller is not required to consume all of the data
2513 * returned, i.e. &consumed is typically set to the number
2514 * of bytes already consumed and the next call to
2515 * skb_seq_read() will return the remaining part of the block.
2516 *
2517 * Note 1: The size of each block of data returned can be arbitrary,
2518 *       this limitation is the cost for zerocopy seqeuental
2519 *       reads of potentially non linear data.
2520 *
2521 * Note 2: Fragment lists within fragments are not implemented
2522 *       at the moment, state->root_skb could be replaced with
2523 *       a stack for this purpose.
2524 */
2525unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2526                          struct skb_seq_state *st)
2527{
2528        unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2529        skb_frag_t *frag;
2530
2531        if (unlikely(abs_offset >= st->upper_offset))
2532                return 0;
2533
2534next_skb:
2535        block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2536
2537        if (abs_offset < block_limit && !st->frag_data) {
2538                *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2539                return block_limit - abs_offset;
2540        }
2541
2542        if (st->frag_idx == 0 && !st->frag_data)
2543                st->stepped_offset += skb_headlen(st->cur_skb);
2544
2545        while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2546                frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2547                block_limit = skb_frag_size(frag) + st->stepped_offset;
2548
2549                if (abs_offset < block_limit) {
2550                        if (!st->frag_data)
2551                                st->frag_data = kmap_atomic(skb_frag_page(frag));
2552
2553                        *data = (u8 *) st->frag_data + frag->page_offset +
2554                                (abs_offset - st->stepped_offset);
2555
2556                        return block_limit - abs_offset;
2557                }
2558
2559                if (st->frag_data) {
2560                        kunmap_atomic(st->frag_data);
2561                        st->frag_data = NULL;
2562                }
2563
2564                st->frag_idx++;
2565                st->stepped_offset += skb_frag_size(frag);
2566        }
2567
2568        if (st->frag_data) {
2569                kunmap_atomic(st->frag_data);
2570                st->frag_data = NULL;
2571        }
2572
2573        if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
2574                st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2575                st->frag_idx = 0;
2576                goto next_skb;
2577        } else if (st->cur_skb->next) {
2578                st->cur_skb = st->cur_skb->next;
2579                st->frag_idx = 0;
2580                goto next_skb;
2581        }
2582
2583        return 0;
2584}
2585EXPORT_SYMBOL(skb_seq_read);
2586
2587/**
2588 * skb_abort_seq_read - Abort a sequential read of skb data
2589 * @st: state variable
2590 *
2591 * Must be called if skb_seq_read() was not called until it
2592 * returned 0.
2593 */
2594void skb_abort_seq_read(struct skb_seq_state *st)
2595{
2596        if (st->frag_data)
2597                kunmap_atomic(st->frag_data);
2598}
2599EXPORT_SYMBOL(skb_abort_seq_read);
2600
2601#define TS_SKB_CB(state)        ((struct skb_seq_state *) &((state)->cb))
2602
2603static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2604                                          struct ts_config *conf,
2605                                          struct ts_state *state)
2606{
2607        return skb_seq_read(offset, text, TS_SKB_CB(state));
2608}
2609
2610static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2611{
2612        skb_abort_seq_read(TS_SKB_CB(state));
2613}
2614
2615/**
2616 * skb_find_text - Find a text pattern in skb data
2617 * @skb: the buffer to look in
2618 * @from: search offset
2619 * @to: search limit
2620 * @config: textsearch configuration
2621 * @state: uninitialized textsearch state variable
2622 *
2623 * Finds a pattern in the skb data according to the specified
2624 * textsearch configuration. Use textsearch_next() to retrieve
2625 * subsequent occurrences of the pattern. Returns the offset
2626 * to the first occurrence or UINT_MAX if no match was found.
2627 */
2628unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2629                           unsigned int to, struct ts_config *config,
2630                           struct ts_state *state)
2631{
2632        unsigned int ret;
2633
2634        config->get_next_block = skb_ts_get_next_block;
2635        config->finish = skb_ts_finish;
2636
2637        skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
2638
2639        ret = textsearch_find(config, state);
2640        return (ret <= to - from ? ret : UINT_MAX);
2641}
2642EXPORT_SYMBOL(skb_find_text);
2643
2644/**
2645 * skb_append_datato_frags - append the user data to a skb
2646 * @sk: sock  structure
2647 * @skb: skb structure to be appened with user data.
2648 * @getfrag: call back function to be used for getting the user data
2649 * @from: pointer to user message iov
2650 * @length: length of the iov message
2651 *
2652 * Description: This procedure append the user data in the fragment part
2653 * of the skb if any page alloc fails user this procedure returns  -ENOMEM
2654 */
2655int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2656                        int (*getfrag)(void *from, char *to, int offset,
2657                                        int len, int odd, struct sk_buff *skb),
2658                        void *from, int length)
2659{
2660        int frg_cnt = 0;
2661        skb_frag_t *frag = NULL;
2662        struct page *page = NULL;
2663        int copy, left;
2664        int offset = 0;
2665        int ret;
2666
2667        do {
2668                /* Return error if we don't have space for new frag */
2669                frg_cnt = skb_shinfo(skb)->nr_frags;
2670                if (frg_cnt >= MAX_SKB_FRAGS)
2671                        return -EFAULT;
2672
2673                /* allocate a new page for next frag */
2674                page = alloc_pages(sk->sk_allocation, 0);
2675
2676                /* If alloc_page fails just return failure and caller will
2677                 * free previous allocated pages by doing kfree_skb()
2678                 */
2679                if (page == NULL)
2680                        return -ENOMEM;
2681
2682                /* initialize the next frag */
2683                skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
2684                skb->truesize += PAGE_SIZE;
2685                atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
2686
2687                /* get the new initialized frag */
2688                frg_cnt = skb_shinfo(skb)->nr_frags;
2689                frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
2690
2691                /* copy the user data to page */
2692                left = PAGE_SIZE - frag->page_offset;
2693                copy = (length > left)? left : length;
2694
2695                ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag),
2696                            offset, copy, 0, skb);
2697                if (ret < 0)
2698                        return -EFAULT;
2699
2700                /* copy was successful so update the size parameters */
2701                skb_frag_size_add(frag, copy);
2702                skb->len += copy;
2703                skb->data_len += copy;
2704                offset += copy;
2705                length -= copy;
2706
2707        } while (length > 0);
2708
2709        return 0;
2710}
2711EXPORT_SYMBOL(skb_append_datato_frags);
2712
2713/**
2714 *      skb_pull_rcsum - pull skb and update receive checksum
2715 *      @skb: buffer to update
2716 *      @len: length of data pulled
2717 *
2718 *      This function performs an skb_pull on the packet and updates
2719 *      the CHECKSUM_COMPLETE checksum.  It should be used on
2720 *      receive path processing instead of skb_pull unless you know
2721 *      that the checksum difference is zero (e.g., a valid IP header)
2722 *      or you are setting ip_summed to CHECKSUM_NONE.
2723 */
2724unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2725{
2726        BUG_ON(len > skb->len);
2727        skb->len -= len;
2728        BUG_ON(skb->len < skb->data_len);
2729        skb_postpull_rcsum(skb, skb->data, len);
2730        return skb->data += len;
2731}
2732EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2733
2734/**
2735 *      skb_segment - Perform protocol segmentation on skb.
2736 *      @skb: buffer to segment
2737 *      @features: features for the output path (see dev->features)
2738 *
2739 *      This function performs segmentation on the given skb.  It returns
2740 *      a pointer to the first in a list of new skbs for the segments.
2741 *      In case of error it returns ERR_PTR(err).
2742 */
2743struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2744{
2745        struct sk_buff *segs = NULL;
2746        struct sk_buff *tail = NULL;
2747        struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
2748        unsigned int mss = skb_shinfo(skb)->gso_size;
2749        unsigned int doffset = skb->data - skb_mac_header(skb);
2750        unsigned int offset = doffset;
2751        unsigned int headroom;
2752        unsigned int len;
2753        int sg = !!(features & NETIF_F_SG);
2754        int nfrags = skb_shinfo(skb)->nr_frags;
2755        int err = -ENOMEM;
2756        int i = 0;
2757        int pos;
2758
2759        __skb_push(skb, doffset);
2760        headroom = skb_headroom(skb);
2761        pos = skb_headlen(skb);
2762
2763        do {
2764                struct sk_buff *nskb;
2765                skb_frag_t *frag;
2766                int hsize;
2767                int size;
2768
2769                len = skb->len - offset;
2770                if (len > mss)
2771                        len = mss;
2772
2773                hsize = skb_headlen(skb) - offset;
2774                if (hsize < 0)
2775                        hsize = 0;
2776                if (hsize > len || !sg)
2777                        hsize = len;
2778
2779                if (!hsize && i >= nfrags) {
2780                        BUG_ON(fskb->len != len);
2781
2782                        pos += len;
2783                        nskb = skb_clone(fskb, GFP_ATOMIC);
2784                        fskb = fskb->next;
2785
2786                        if (unlikely(!nskb))
2787                                goto err;
2788
2789                        hsize = skb_end_offset(nskb);
2790                        if (skb_cow_head(nskb, doffset + headroom)) {
2791                                kfree_skb(nskb);
2792                                goto err;
2793                        }
2794
2795                        nskb->truesize += skb_end_offset(nskb) - hsize;
2796                        skb_release_head_state(nskb);
2797                        __skb_push(nskb, doffset);
2798                } else {
2799                        nskb = __alloc_skb(hsize + doffset + headroom,
2800                                           GFP_ATOMIC, skb_alloc_rx_flag(skb),
2801                                           NUMA_NO_NODE);
2802
2803                        if (unlikely(!nskb))
2804                                goto err;
2805
2806                        skb_reserve(nskb, headroom);
2807                        __skb_put(nskb, doffset);
2808                }
2809
2810                if (segs)
2811                        tail->next = nskb;
2812                else
2813                        segs = nskb;
2814                tail = nskb;
2815
2816                __copy_skb_header(nskb, skb);
2817                nskb->mac_len = skb->mac_len;
2818
2819                /* nskb and skb might have different headroom */
2820                if (nskb->ip_summed == CHECKSUM_PARTIAL)
2821                        nskb->csum_start += skb_headroom(nskb) - headroom;
2822
2823                skb_reset_mac_header(nskb);
2824                skb_set_network_header(nskb, skb->mac_len);
2825                nskb->transport_header = (nskb->network_header +
2826                                          skb_network_header_len(skb));
2827                skb_copy_from_linear_data(skb, nskb->data, doffset);
2828
2829                if (fskb != skb_shinfo(skb)->frag_list)
2830                        continue;
2831
2832                if (!sg) {
2833                        nskb->ip_summed = CHECKSUM_NONE;
2834                        nskb->csum = skb_copy_and_csum_bits(skb, offset,
2835                                                            skb_put(nskb, len),
2836                                                            len, 0);
2837                        continue;
2838                }
2839
2840                frag = skb_shinfo(nskb)->frags;
2841
2842                skb_copy_from_linear_data_offset(skb, offset,
2843                                                 skb_put(nskb, hsize), hsize);
2844
2845                while (pos < offset + len && i < nfrags) {
2846                        *frag = skb_shinfo(skb)->frags[i];
2847                        __skb_frag_ref(frag);
2848                        size = skb_frag_size(frag);
2849
2850                        if (pos < offset) {
2851                                frag->page_offset += offset - pos;
2852                                skb_frag_size_sub(frag, offset - pos);
2853                        }
2854
2855                        skb_shinfo(nskb)->nr_frags++;
2856
2857                        if (pos + size <= offset + len) {
2858                                i++;
2859                                pos += size;
2860                        } else {
2861                                skb_frag_size_sub(frag, pos + size - (offset + len));
2862                                goto skip_fraglist;
2863                        }
2864
2865                        frag++;
2866                }
2867
2868                if (pos < offset + len) {
2869                        struct sk_buff *fskb2 = fskb;
2870
2871                        BUG_ON(pos + fskb->len != offset + len);
2872
2873                        pos += fskb->len;
2874                        fskb = fskb->next;
2875
2876                        if (fskb2->next) {
2877                                fskb2 = skb_clone(fskb2, GFP_ATOMIC);
2878                                if (!fskb2)
2879                                        goto err;
2880                        } else
2881                                skb_get(fskb2);
2882
2883                        SKB_FRAG_ASSERT(nskb);
2884                        skb_shinfo(nskb)->frag_list = fskb2;
2885                }
2886
2887skip_fraglist:
2888                nskb->data_len = len - hsize;
2889                nskb->len += nskb->data_len;
2890                nskb->truesize += nskb->data_len;
2891        } while ((offset += len) < skb->len);
2892
2893        return segs;
2894
2895err:
2896        while ((skb = segs)) {
2897                segs = skb->next;
2898                kfree_skb(skb);
2899        }
2900        return ERR_PTR(err);
2901}
2902EXPORT_SYMBOL_GPL(skb_segment);
2903
2904int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2905{
2906        struct sk_buff *p = *head;
2907        struct sk_buff *nskb;
2908        struct skb_shared_info *skbinfo = skb_shinfo(skb);
2909        struct skb_shared_info *pinfo = skb_shinfo(p);
2910        unsigned int headroom;
2911        unsigned int len = skb_gro_len(skb);
2912        unsigned int offset = skb_gro_offset(skb);
2913        unsigned int headlen = skb_headlen(skb);
2914        unsigned int delta_truesize;
2915
2916        if (p->len + len >= 65536)
2917                return -E2BIG;
2918
2919        if (pinfo->frag_list)
2920                goto merge;
2921        else if (headlen <= offset) {
2922                skb_frag_t *frag;
2923                skb_frag_t *frag2;
2924                int i = skbinfo->nr_frags;
2925                int nr_frags = pinfo->nr_frags + i;
2926
2927                offset -= headlen;
2928
2929                if (nr_frags > MAX_SKB_FRAGS)
2930                        return -E2BIG;
2931
2932                pinfo->nr_frags = nr_frags;
2933                skbinfo->nr_frags = 0;
2934
2935                frag = pinfo->frags + nr_frags;
2936                frag2 = skbinfo->frags + i;
2937                do {
2938                        *--frag = *--frag2;
2939                } while (--i);
2940
2941                frag->page_offset += offset;
2942                skb_frag_size_sub(frag, offset);
2943
2944                /* all fragments truesize : remove (head size + sk_buff) */
2945                delta_truesize = skb->truesize -
2946                                 SKB_TRUESIZE(skb_end_offset(skb));
2947
2948                skb->truesize -= skb->data_len;
2949                skb->len -= skb->data_len;
2950                skb->data_len = 0;
2951
2952                NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
2953                goto done;
2954        } else if (skb->head_frag) {
2955                int nr_frags = pinfo->nr_frags;
2956                skb_frag_t *frag = pinfo->frags + nr_frags;
2957                struct page *page = virt_to_head_page(skb->head);
2958                unsigned int first_size = headlen - offset;
2959                unsigned int first_offset;
2960
2961                if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
2962                        return -E2BIG;
2963
2964                first_offset = skb->data -
2965                               (unsigned char *)page_address(page) +
2966                               offset;
2967
2968                pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
2969
2970                frag->page.p      = page;
2971                frag->page_offset = first_offset;
2972                skb_frag_size_set(frag, first_size);
2973
2974                memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
2975                /* We dont need to clear skbinfo->nr_frags here */
2976
2977                delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
2978                NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
2979                goto done;
2980        } else if (skb_gro_len(p) != pinfo->gso_size)
2981                return -E2BIG;
2982
2983        headroom = skb_headroom(p);
2984        nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
2985        if (unlikely(!nskb))
2986                return -ENOMEM;
2987
2988        __copy_skb_header(nskb, p);
2989        nskb->mac_len = p->mac_len;
2990
2991        skb_reserve(nskb, headroom);
2992        __skb_put(nskb, skb_gro_offset(p));
2993
2994        skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
2995        skb_set_network_header(nskb, skb_network_offset(p));
2996        skb_set_transport_header(nskb, skb_transport_offset(p));
2997
2998        __skb_pull(p, skb_gro_offset(p));
2999        memcpy(skb_mac_header(nskb), skb_mac_header(p),
3000               p->data - skb_mac_header(p));
3001
3002        *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
3003        skb_shinfo(nskb)->frag_list = p;
3004        skb_shinfo(nskb)->gso_size = pinfo->gso_size;
3005        pinfo->gso_size = 0;
3006        skb_header_release(p);
3007        NAPI_GRO_CB(nskb)->last = p;
3008
3009        nskb->data_len += p->len;
3010        nskb->truesize += p->truesize;
3011        nskb->len += p->len;
3012
3013        *head = nskb;
3014        nskb->next = p->next;
3015        p->next = NULL;
3016
3017        p = nskb;
3018
3019merge:
3020        delta_truesize = skb->truesize;
3021        if (offset > headlen) {
3022                unsigned int eat = offset - headlen;
3023
3024                skbinfo->frags[0].page_offset += eat;
3025                skb_frag_size_sub(&skbinfo->frags[0], eat);
3026                skb->data_len -= eat;
3027                skb->len -= eat;
3028                offset = headlen;
3029        }
3030
3031        __skb_pull(skb, offset);
3032
3033        NAPI_GRO_CB(p)->last->next = skb;
3034        NAPI_GRO_CB(p)->last = skb;
3035        skb_header_release(skb);
3036
3037done:
3038        NAPI_GRO_CB(p)->count++;
3039        p->data_len += len;
3040        p->truesize += delta_truesize;
3041        p->len += len;
3042
3043        NAPI_GRO_CB(skb)->same_flow = 1;
3044        return 0;
3045}
3046EXPORT_SYMBOL_GPL(skb_gro_receive);
3047
3048void __init skb_init(void)
3049{
3050        skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
3051                                              sizeof(struct sk_buff),
3052                                              0,
3053                                              SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3054                                              NULL);
3055        skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
3056                                                (2*sizeof(struct sk_buff)) +
3057                                                sizeof(atomic_t),
3058                                                0,
3059                                                SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3060                                                NULL);
3061}
3062
3063/**
3064 *      skb_to_sgvec - Fill a scatter-gather list from a socket buffer
3065 *      @skb: Socket buffer containing the buffers to be mapped
3066 *      @sg: The scatter-gather list to map into
3067 *      @offset: The offset into the buffer's contents to start mapping
3068 *      @len: Length of buffer space to be mapped
3069 *
3070 *      Fill the specified scatter-gather list with mappings/pointers into a
3071 *      region of the buffer space attached to a socket buffer.
3072 */
3073static int
3074__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3075{
3076        int start = skb_headlen(skb);
3077        int i, copy = start - offset;
3078        struct sk_buff *frag_iter;
3079        int elt = 0;
3080
3081        if (copy > 0) {
3082                if (copy > len)
3083                        copy = len;
3084                sg_set_buf(sg, skb->data + offset, copy);
3085                elt++;
3086                if ((len -= copy) == 0)
3087                        return elt;
3088                offset += copy;
3089        }
3090
3091        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3092                int end;
3093
3094                WARN_ON(start > offset + len);
3095
3096                end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
3097                if ((copy = end - offset) > 0) {
3098                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3099
3100                        if (copy > len)
3101                                copy = len;
3102                        sg_set_page(&sg[elt], skb_frag_page(frag), copy,
3103                                        frag->page_offset+offset-start);
3104                        elt++;
3105                        if (!(len -= copy))
3106                                return elt;
3107                        offset += copy;
3108                }
3109                start = end;
3110        }
3111
3112        skb_walk_frags(skb, frag_iter) {
3113                int end;
3114
3115                WARN_ON(start > offset + len);
3116
3117                end = start + frag_iter->len;
3118                if ((copy = end - offset) > 0) {
3119                        if (copy > len)
3120                                copy = len;
3121                        elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
3122                                              copy);
3123                        if ((len -= copy) == 0)
3124                                return elt;
3125                        offset += copy;
3126                }
3127                start = end;
3128        }
3129        BUG_ON(len);
3130        return elt;
3131}
3132
3133int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3134{
3135        int nsg = __skb_to_sgvec(skb, sg, offset, len);
3136
3137        sg_mark_end(&sg[nsg - 1]);
3138
3139        return nsg;
3140}
3141EXPORT_SYMBOL_GPL(skb_to_sgvec);
3142
3143/**
3144 *      skb_cow_data - Check that a socket buffer's data buffers are writable
3145 *      @skb: The socket buffer to check.
3146 *      @tailbits: Amount of trailing space to be added
3147 *      @trailer: Returned pointer to the skb where the @tailbits space begins
3148 *
3149 *      Make sure that the data buffers attached to a socket buffer are
3150 *      writable. If they are not, private copies are made of the data buffers
3151 *      and the socket buffer is set to use these instead.
3152 *
3153 *      If @tailbits is given, make sure that there is space to write @tailbits
3154 *      bytes of data beyond current end of socket buffer.  @trailer will be
3155 *      set to point to the skb in which this space begins.
3156 *
3157 *      The number of scatterlist elements required to completely map the
3158 *      COW'd and extended socket buffer will be returned.
3159 */
3160int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
3161{
3162        int copyflag;
3163        int elt;
3164        struct sk_buff *skb1, **skb_p;
3165
3166        /* If skb is cloned or its head is paged, reallocate
3167         * head pulling out all the pages (pages are considered not writable
3168         * at the moment even if they are anonymous).
3169         */
3170        if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
3171            __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
3172                return -ENOMEM;
3173
3174        /* Easy case. Most of packets will go this way. */
3175        if (!skb_has_frag_list(skb)) {
3176                /* A little of trouble, not enough of space for trailer.
3177                 * This should not happen, when stack is tuned to generate
3178                 * good frames. OK, on miss we reallocate and reserve even more
3179                 * space, 128 bytes is fair. */
3180
3181                if (skb_tailroom(skb) < tailbits &&
3182                    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
3183                        return -ENOMEM;
3184
3185                /* Voila! */
3186                *trailer = skb;
3187                return 1;
3188        }
3189
3190        /* Misery. We are in troubles, going to mincer fragments... */
3191
3192        elt = 1;
3193        skb_p = &skb_shinfo(skb)->frag_list;
3194        copyflag = 0;
3195
3196        while ((skb1 = *skb_p) != NULL) {
3197                int ntail = 0;
3198
3199                /* The fragment is partially pulled by someone,
3200                 * this can happen on input. Copy it and everything
3201                 * after it. */
3202
3203                if (skb_shared(skb1))
3204                        copyflag = 1;
3205
3206                /* If the skb is the last, worry about trailer. */
3207
3208                if (skb1->next == NULL && tailbits) {
3209                        if (skb_shinfo(skb1)->nr_frags ||
3210                            skb_has_frag_list(skb1) ||
3211                            skb_tailroom(skb1) < tailbits)
3212                                ntail = tailbits + 128;
3213                }
3214
3215                if (copyflag ||
3216                    skb_cloned(skb1) ||
3217                    ntail ||
3218                    skb_shinfo(skb1)->nr_frags ||
3219                    skb_has_frag_list(skb1)) {
3220                        struct sk_buff *skb2;
3221
3222                        /* Fuck, we are miserable poor guys... */
3223                        if (ntail == 0)
3224                                skb2 = skb_copy(skb1, GFP_ATOMIC);
3225                        else
3226                                skb2 = skb_copy_expand(skb1,
3227                                                       skb_headroom(skb1),
3228                                                       ntail,
3229                                                       GFP_ATOMIC);
3230                        if (unlikely(skb2 == NULL))
3231                                return -ENOMEM;
3232
3233                        if (skb1->sk)
3234                                skb_set_owner_w(skb2, skb1->sk);
3235
3236                        /* Looking around. Are we still alive?
3237                         * OK, link new skb, drop old one */
3238
3239                        skb2->next = skb1->next;
3240                        *skb_p = skb2;
3241                        kfree_skb(skb1);
3242                        skb1 = skb2;
3243                }
3244                elt++;
3245                *trailer = skb1;
3246                skb_p = &skb1->next;
3247        }
3248
3249        return elt;
3250}
3251EXPORT_SYMBOL_GPL(skb_cow_data);
3252
3253static void sock_rmem_free(struct sk_buff *skb)
3254{
3255        struct sock *sk = skb->sk;
3256
3257        atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
3258}
3259
3260/*
3261 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
3262 */
3263int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3264{
3265        int len = skb->len;
3266
3267        if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
3268            (unsigned int)sk->sk_rcvbuf)
3269                return -ENOMEM;
3270
3271        skb_orphan(skb);
3272        skb->sk = sk;
3273        skb->destructor = sock_rmem_free;
3274        atomic_add(skb->truesize, &sk->sk_rmem_alloc);
3275
3276        /* before exiting rcu section, make sure dst is refcounted */
3277        skb_dst_force(skb);
3278
3279        skb_queue_tail(&sk->sk_error_queue, skb);
3280        if (!sock_flag(sk, SOCK_DEAD))
3281                sk->sk_data_ready(sk, len);
3282        return 0;
3283}
3284EXPORT_SYMBOL(sock_queue_err_skb);
3285
3286void skb_tstamp_tx(struct sk_buff *orig_skb,
3287                struct skb_shared_hwtstamps *hwtstamps)
3288{
3289        struct sock *sk = orig_skb->sk;
3290        struct sock_exterr_skb *serr;
3291        struct sk_buff *skb;
3292        int err;
3293
3294        if (!sk)
3295                return;
3296
3297        skb = skb_clone(orig_skb, GFP_ATOMIC);
3298        if (!skb)
3299                return;
3300
3301        if (hwtstamps) {
3302                *skb_hwtstamps(skb) =
3303                        *hwtstamps;
3304        } else {
3305                /*
3306                 * no hardware time stamps available,
3307                 * so keep the shared tx_flags and only
3308                 * store software time stamp
3309                 */
3310                skb->tstamp = ktime_get_real();
3311        }
3312
3313        serr = SKB_EXT_ERR(skb);
3314        memset(serr, 0, sizeof(*serr));
3315        serr->ee.ee_errno = ENOMSG;
3316        serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3317
3318        err = sock_queue_err_skb(sk, skb);
3319
3320        if (err)
3321                kfree_skb(skb);
3322}
3323EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3324
3325void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
3326{
3327        struct sock *sk = skb->sk;
3328        struct sock_exterr_skb *serr;
3329        int err;
3330
3331        skb->wifi_acked_valid = 1;
3332        skb->wifi_acked = acked;
3333
3334        serr = SKB_EXT_ERR(skb);
3335        memset(serr, 0, sizeof(*serr));
3336        serr->ee.ee_errno = ENOMSG;
3337        serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
3338
3339        err = sock_queue_err_skb(sk, skb);
3340        if (err)
3341                kfree_skb(skb);
3342}
3343EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
3344
3345
3346/**
3347 * skb_partial_csum_set - set up and verify partial csum values for packet
3348 * @skb: the skb to set
3349 * @start: the number of bytes after skb->data to start checksumming.
3350 * @off: the offset from start to place the checksum.
3351 *
3352 * For untrusted partially-checksummed packets, we need to make sure the values
3353 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
3354 *
3355 * This function checks and sets those values and skb->ip_summed: if this
3356 * returns false you should drop the packet.
3357 */
3358bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3359{
3360        if (unlikely(start > skb_headlen(skb)) ||
3361            unlikely((int)start + off > skb_headlen(skb) - 2)) {
3362                net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
3363                                     start, off, skb_headlen(skb));
3364                return false;
3365        }
3366        skb->ip_summed = CHECKSUM_PARTIAL;
3367        skb->csum_start = skb_headroom(skb) + start;
3368        skb->csum_offset = off;
3369        return true;
3370}
3371EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3372
3373void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3374{
3375        net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
3376                             skb->dev->name);
3377}
3378EXPORT_SYMBOL(__skb_warn_lro_forwarding);
3379
3380void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
3381{
3382        if (head_stolen) {
3383                skb_release_head_state(skb);
3384                kmem_cache_free(skbuff_head_cache, skb);
3385        } else {
3386                __kfree_skb(skb);
3387        }
3388}
3389EXPORT_SYMBOL(kfree_skb_partial);
3390
3391/**
3392 * skb_try_coalesce - try to merge skb to prior one
3393 * @to: prior buffer
3394 * @from: buffer to add
3395 * @fragstolen: pointer to boolean
3396 * @delta_truesize: how much more was allocated than was requested
3397 */
3398bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
3399                      bool *fragstolen, int *delta_truesize)
3400{
3401        int i, delta, len = from->len;
3402
3403        *fragstolen = false;
3404
3405        if (skb_cloned(to))
3406                return false;
3407
3408        if (len <= skb_tailroom(to)) {
3409                BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
3410                *delta_truesize = 0;
3411                return true;
3412        }
3413
3414        if (skb_has_frag_list(to) || skb_has_frag_list(from))
3415                return false;
3416
3417        if (skb_headlen(from) != 0) {
3418                struct page *page;
3419                unsigned int offset;
3420
3421                if (skb_shinfo(to)->nr_frags +
3422                    skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
3423                        return false;
3424
3425                if (skb_head_is_locked(from))
3426                        return false;
3427
3428                delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
3429
3430                page = virt_to_head_page(from->head);
3431                offset = from->data - (unsigned char *)page_address(page);
3432
3433                skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
3434                                   page, offset, skb_headlen(from));
3435                *fragstolen = true;
3436        } else {
3437                if (skb_shinfo(to)->nr_frags +
3438                    skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
3439                        return false;
3440
3441                delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
3442        }
3443
3444        WARN_ON_ONCE(delta < len);
3445
3446        memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
3447               skb_shinfo(from)->frags,
3448               skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
3449        skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
3450
3451        if (!skb_cloned(from))
3452                skb_shinfo(from)->nr_frags = 0;
3453
3454        /* if the skb is not cloned this does nothing
3455         * since we set nr_frags to 0.
3456         */
3457        for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
3458                skb_frag_ref(from, i);
3459
3460        to->truesize += delta;
3461        to->len += len;
3462        to->data_len += len;
3463
3464        *delta_truesize = delta;
3465        return true;
3466}
3467EXPORT_SYMBOL(skb_try_coalesce);
3468
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.