linux/net/core/skbuff.c
<<
>>
Prefs
   1/*
   2 *      Routines having to do with the 'struct sk_buff' memory handlers.
   3 *
   4 *      Authors:        Alan Cox <alan@lxorguk.ukuu.org.uk>
   5 *                      Florian La Roche <rzsfl@rz.uni-sb.de>
   6 *
   7 *      Fixes:
   8 *              Alan Cox        :       Fixed the worst of the load
   9 *                                      balancer bugs.
  10 *              Dave Platt      :       Interrupt stacking fix.
  11 *      Richard Kooijman        :       Timestamp fixes.
  12 *              Alan Cox        :       Changed buffer format.
  13 *              Alan Cox        :       destructor hook for AF_UNIX etc.
  14 *              Linus Torvalds  :       Better skb_clone.
  15 *              Alan Cox        :       Added skb_copy.
  16 *              Alan Cox        :       Added all the changed routines Linus
  17 *                                      only put in the headers
  18 *              Ray VanTassle   :       Fixed --skb->lock in free
  19 *              Alan Cox        :       skb_copy copy arp field
  20 *              Andi Kleen      :       slabified it.
  21 *              Robert Olsson   :       Removed skb_head_pool
  22 *
  23 *      NOTE:
  24 *              The __skb_ routines should be called with interrupts
  25 *      disabled, or you better be *real* sure that the operation is atomic
  26 *      with respect to whatever list is being frobbed (e.g. via lock_sock()
  27 *      or via disabling bottom half handlers, etc).
  28 *
  29 *      This program is free software; you can redistribute it and/or
  30 *      modify it under the terms of the GNU General Public License
  31 *      as published by the Free Software Foundation; either version
  32 *      2 of the License, or (at your option) any later version.
  33 */
  34
  35/*
  36 *      The functions in this file will not compile correctly with gcc 2.4.x
  37 */
  38
  39#include <linux/module.h>
  40#include <linux/types.h>
  41#include <linux/kernel.h>
  42#include <linux/mm.h>
  43#include <linux/interrupt.h>
  44#include <linux/in.h>
  45#include <linux/inet.h>
  46#include <linux/slab.h>
  47#include <linux/netdevice.h>
  48#ifdef CONFIG_NET_CLS_ACT
  49#include <net/pkt_sched.h>
  50#endif
  51#include <linux/string.h>
  52#include <linux/skbuff.h>
  53#include <linux/splice.h>
  54#include <linux/cache.h>
  55#include <linux/rtnetlink.h>
  56#include <linux/init.h>
  57#include <linux/scatterlist.h>
  58#include <linux/errqueue.h>
  59
  60#include <net/protocol.h>
  61#include <net/dst.h>
  62#include <net/sock.h>
  63#include <net/checksum.h>
  64#include <net/xfrm.h>
  65
  66#include <asm/uaccess.h>
  67#include <asm/system.h>
  68#include <trace/skb.h>
  69
  70#include "kmap_skb.h"
  71
  72static struct kmem_cache *skbuff_head_cache __read_mostly;
  73static struct kmem_cache *skbuff_fclone_cache __read_mostly;
  74
  75static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
  76                                  struct pipe_buffer *buf)
  77{
  78        put_page(buf->page);
  79}
  80
  81static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
  82                                struct pipe_buffer *buf)
  83{
  84        get_page(buf->page);
  85}
  86
  87static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
  88                               struct pipe_buffer *buf)
  89{
  90        return 1;
  91}
  92
  93
  94/* Pipe buffer operations for a socket. */
  95static struct pipe_buf_operations sock_pipe_buf_ops = {
  96        .can_merge = 0,
  97        .map = generic_pipe_buf_map,
  98        .unmap = generic_pipe_buf_unmap,
  99        .confirm = generic_pipe_buf_confirm,
 100        .release = sock_pipe_buf_release,
 101        .steal = sock_pipe_buf_steal,
 102        .get = sock_pipe_buf_get,
 103};
 104
 105/*
 106 *      Keep out-of-line to prevent kernel bloat.
 107 *      __builtin_return_address is not used because it is not always
 108 *      reliable.
 109 */
 110
 111/**
 112 *      skb_over_panic  -       private function
 113 *      @skb: buffer
 114 *      @sz: size
 115 *      @here: address
 116 *
 117 *      Out of line support code for skb_put(). Not user callable.
 118 */
 119void skb_over_panic(struct sk_buff *skb, int sz, void *here)
 120{
 121        printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
 122                          "data:%p tail:%#lx end:%#lx dev:%s\n",
 123               here, skb->len, sz, skb->head, skb->data,
 124               (unsigned long)skb->tail, (unsigned long)skb->end,
 125               skb->dev ? skb->dev->name : "<NULL>");
 126        BUG();
 127}
 128EXPORT_SYMBOL(skb_over_panic);
 129
 130/**
 131 *      skb_under_panic -       private function
 132 *      @skb: buffer
 133 *      @sz: size
 134 *      @here: address
 135 *
 136 *      Out of line support code for skb_push(). Not user callable.
 137 */
 138
 139void skb_under_panic(struct sk_buff *skb, int sz, void *here)
 140{
 141        printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
 142                          "data:%p tail:%#lx end:%#lx dev:%s\n",
 143               here, skb->len, sz, skb->head, skb->data,
 144               (unsigned long)skb->tail, (unsigned long)skb->end,
 145               skb->dev ? skb->dev->name : "<NULL>");
 146        BUG();
 147}
 148EXPORT_SYMBOL(skb_under_panic);
 149
 150/*      Allocate a new skbuff. We do this ourselves so we can fill in a few
 151 *      'private' fields and also do memory statistics to find all the
 152 *      [BEEP] leaks.
 153 *
 154 */
 155
 156/**
 157 *      __alloc_skb     -       allocate a network buffer
 158 *      @size: size to allocate
 159 *      @gfp_mask: allocation mask
 160 *      @fclone: allocate from fclone cache instead of head cache
 161 *              and allocate a cloned (child) skb
 162 *      @node: numa node to allocate memory on
 163 *
 164 *      Allocate a new &sk_buff. The returned buffer has no headroom and a
 165 *      tail room of size bytes. The object has a reference count of one.
 166 *      The return is the buffer. On a failure the return is %NULL.
 167 *
 168 *      Buffers may only be allocated from interrupts using a @gfp_mask of
 169 *      %GFP_ATOMIC.
 170 */
 171struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 172                            int fclone, int node)
 173{
 174        struct kmem_cache *cache;
 175        struct skb_shared_info *shinfo;
 176        struct sk_buff *skb;
 177        u8 *data;
 178
 179        cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
 180
 181        /* Get the HEAD */
 182        skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
 183        if (!skb)
 184                goto out;
 185
 186        size = SKB_DATA_ALIGN(size);
 187        data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
 188                        gfp_mask, node);
 189        if (!data)
 190                goto nodata;
 191
 192        /*
 193         * Only clear those fields we need to clear, not those that we will
 194         * actually initialise below. Hence, don't put any more fields after
 195         * the tail pointer in struct sk_buff!
 196         */
 197        memset(skb, 0, offsetof(struct sk_buff, tail));
 198        skb->truesize = size + sizeof(struct sk_buff);
 199        atomic_set(&skb->users, 1);
 200        skb->head = data;
 201        skb->data = data;
 202        skb_reset_tail_pointer(skb);
 203        skb->end = skb->tail + size;
 204        /* make sure we initialize shinfo sequentially */
 205        shinfo = skb_shinfo(skb);
 206        atomic_set(&shinfo->dataref, 1);
 207        shinfo->nr_frags  = 0;
 208        shinfo->gso_size = 0;
 209        shinfo->gso_segs = 0;
 210        shinfo->gso_type = 0;
 211        shinfo->ip6_frag_id = 0;
 212        shinfo->tx_flags.flags = 0;
 213        shinfo->frag_list = NULL;
 214        memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
 215
 216        if (fclone) {
 217                struct sk_buff *child = skb + 1;
 218                atomic_t *fclone_ref = (atomic_t *) (child + 1);
 219
 220                skb->fclone = SKB_FCLONE_ORIG;
 221                atomic_set(fclone_ref, 1);
 222
 223                child->fclone = SKB_FCLONE_UNAVAILABLE;
 224        }
 225out:
 226        return skb;
 227nodata:
 228        kmem_cache_free(cache, skb);
 229        skb = NULL;
 230        goto out;
 231}
 232EXPORT_SYMBOL(__alloc_skb);
 233
 234/**
 235 *      __netdev_alloc_skb - allocate an skbuff for rx on a specific device
 236 *      @dev: network device to receive on
 237 *      @length: length to allocate
 238 *      @gfp_mask: get_free_pages mask, passed to alloc_skb
 239 *
 240 *      Allocate a new &sk_buff and assign it a usage count of one. The
 241 *      buffer has unspecified headroom built in. Users should allocate
 242 *      the headroom they think they need without accounting for the
 243 *      built in space. The built in space is used for optimisations.
 244 *
 245 *      %NULL is returned if there is no free memory.
 246 */
 247struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
 248                unsigned int length, gfp_t gfp_mask)
 249{
 250        int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
 251        struct sk_buff *skb;
 252
 253        skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
 254        if (likely(skb)) {
 255                skb_reserve(skb, NET_SKB_PAD);
 256                skb->dev = dev;
 257        }
 258        return skb;
 259}
 260EXPORT_SYMBOL(__netdev_alloc_skb);
 261
 262struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
 263{
 264        int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
 265        struct page *page;
 266
 267        page = alloc_pages_node(node, gfp_mask, 0);
 268        return page;
 269}
 270EXPORT_SYMBOL(__netdev_alloc_page);
 271
 272void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
 273                int size)
 274{
 275        skb_fill_page_desc(skb, i, page, off, size);
 276        skb->len += size;
 277        skb->data_len += size;
 278        skb->truesize += size;
 279}
 280EXPORT_SYMBOL(skb_add_rx_frag);
 281
 282/**
 283 *      dev_alloc_skb - allocate an skbuff for receiving
 284 *      @length: length to allocate
 285 *
 286 *      Allocate a new &sk_buff and assign it a usage count of one. The
 287 *      buffer has unspecified headroom built in. Users should allocate
 288 *      the headroom they think they need without accounting for the
 289 *      built in space. The built in space is used for optimisations.
 290 *
 291 *      %NULL is returned if there is no free memory. Although this function
 292 *      allocates memory it can be called from an interrupt.
 293 */
 294struct sk_buff *dev_alloc_skb(unsigned int length)
 295{
 296        /*
 297         * There is more code here than it seems:
 298         * __dev_alloc_skb is an inline
 299         */
 300        return __dev_alloc_skb(length, GFP_ATOMIC);
 301}
 302EXPORT_SYMBOL(dev_alloc_skb);
 303
 304static void skb_drop_list(struct sk_buff **listp)
 305{
 306        struct sk_buff *list = *listp;
 307
 308        *listp = NULL;
 309
 310        do {
 311                struct sk_buff *this = list;
 312                list = list->next;
 313                kfree_skb(this);
 314        } while (list);
 315}
 316
 317static inline void skb_drop_fraglist(struct sk_buff *skb)
 318{
 319        skb_drop_list(&skb_shinfo(skb)->frag_list);
 320}
 321
 322static void skb_clone_fraglist(struct sk_buff *skb)
 323{
 324        struct sk_buff *list;
 325
 326        for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
 327                skb_get(list);
 328}
 329
 330static void skb_release_data(struct sk_buff *skb)
 331{
 332        if (!skb->cloned ||
 333            !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
 334                               &skb_shinfo(skb)->dataref)) {
 335                if (skb_shinfo(skb)->nr_frags) {
 336                        int i;
 337                        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
 338                                put_page(skb_shinfo(skb)->frags[i].page);
 339                }
 340
 341                if (skb_shinfo(skb)->frag_list)
 342                        skb_drop_fraglist(skb);
 343
 344                kfree(skb->head);
 345        }
 346}
 347
 348/*
 349 *      Free an skbuff by memory without cleaning the state.
 350 */
 351static void kfree_skbmem(struct sk_buff *skb)
 352{
 353        struct sk_buff *other;
 354        atomic_t *fclone_ref;
 355
 356        switch (skb->fclone) {
 357        case SKB_FCLONE_UNAVAILABLE:
 358                kmem_cache_free(skbuff_head_cache, skb);
 359                break;
 360
 361        case SKB_FCLONE_ORIG:
 362                fclone_ref = (atomic_t *) (skb + 2);
 363                if (atomic_dec_and_test(fclone_ref))
 364                        kmem_cache_free(skbuff_fclone_cache, skb);
 365                break;
 366
 367        case SKB_FCLONE_CLONE:
 368                fclone_ref = (atomic_t *) (skb + 1);
 369                other = skb - 1;
 370
 371                /* The clone portion is available for
 372                 * fast-cloning again.
 373                 */
 374                skb->fclone = SKB_FCLONE_UNAVAILABLE;
 375
 376                if (atomic_dec_and_test(fclone_ref))
 377                        kmem_cache_free(skbuff_fclone_cache, other);
 378                break;
 379        }
 380}
 381
 382static void skb_release_head_state(struct sk_buff *skb)
 383{
 384        dst_release(skb->dst);
 385#ifdef CONFIG_XFRM
 386        secpath_put(skb->sp);
 387#endif
 388        if (skb->destructor) {
 389                WARN_ON(in_irq());
 390                skb->destructor(skb);
 391        }
 392#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 393        nf_conntrack_put(skb->nfct);
 394        nf_conntrack_put_reasm(skb->nfct_reasm);
 395#endif
 396#ifdef CONFIG_BRIDGE_NETFILTER
 397        nf_bridge_put(skb->nf_bridge);
 398#endif
 399/* XXX: IS this still necessary? - JHS */
 400#ifdef CONFIG_NET_SCHED
 401        skb->tc_index = 0;
 402#ifdef CONFIG_NET_CLS_ACT
 403        skb->tc_verd = 0;
 404#endif
 405#endif
 406}
 407
 408/* Free everything but the sk_buff shell. */
 409static void skb_release_all(struct sk_buff *skb)
 410{
 411        skb_release_head_state(skb);
 412        skb_release_data(skb);
 413}
 414
 415/**
 416 *      __kfree_skb - private function
 417 *      @skb: buffer
 418 *
 419 *      Free an sk_buff. Release anything attached to the buffer.
 420 *      Clean the state. This is an internal helper function. Users should
 421 *      always call kfree_skb
 422 */
 423
 424void __kfree_skb(struct sk_buff *skb)
 425{
 426        skb_release_all(skb);
 427        kfree_skbmem(skb);
 428}
 429EXPORT_SYMBOL(__kfree_skb);
 430
 431/**
 432 *      kfree_skb - free an sk_buff
 433 *      @skb: buffer to free
 434 *
 435 *      Drop a reference to the buffer and free it if the usage count has
 436 *      hit zero.
 437 */
 438void kfree_skb(struct sk_buff *skb)
 439{
 440        if (unlikely(!skb))
 441                return;
 442        if (likely(atomic_read(&skb->users) == 1))
 443                smp_rmb();
 444        else if (likely(!atomic_dec_and_test(&skb->users)))
 445                return;
 446        trace_kfree_skb(skb, __builtin_return_address(0));
 447        __kfree_skb(skb);
 448}
 449EXPORT_SYMBOL(kfree_skb);
 450
 451/**
 452 *      consume_skb - free an skbuff
 453 *      @skb: buffer to free
 454 *
 455 *      Drop a ref to the buffer and free it if the usage count has hit zero
 456 *      Functions identically to kfree_skb, but kfree_skb assumes that the frame
 457 *      is being dropped after a failure and notes that
 458 */
 459void consume_skb(struct sk_buff *skb)
 460{
 461        if (unlikely(!skb))
 462                return;
 463        if (likely(atomic_read(&skb->users) == 1))
 464                smp_rmb();
 465        else if (likely(!atomic_dec_and_test(&skb->users)))
 466                return;
 467        __kfree_skb(skb);
 468}
 469EXPORT_SYMBOL(consume_skb);
 470
 471/**
 472 *      skb_recycle_check - check if skb can be reused for receive
 473 *      @skb: buffer
 474 *      @skb_size: minimum receive buffer size
 475 *
 476 *      Checks that the skb passed in is not shared or cloned, and
 477 *      that it is linear and its head portion at least as large as
 478 *      skb_size so that it can be recycled as a receive buffer.
 479 *      If these conditions are met, this function does any necessary
 480 *      reference count dropping and cleans up the skbuff as if it
 481 *      just came from __alloc_skb().
 482 */
 483int skb_recycle_check(struct sk_buff *skb, int skb_size)
 484{
 485        struct skb_shared_info *shinfo;
 486
 487        if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
 488                return 0;
 489
 490        skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
 491        if (skb_end_pointer(skb) - skb->head < skb_size)
 492                return 0;
 493
 494        if (skb_shared(skb) || skb_cloned(skb))
 495                return 0;
 496
 497        skb_release_head_state(skb);
 498        shinfo = skb_shinfo(skb);
 499        atomic_set(&shinfo->dataref, 1);
 500        shinfo->nr_frags = 0;
 501        shinfo->gso_size = 0;
 502        shinfo->gso_segs = 0;
 503        shinfo->gso_type = 0;
 504        shinfo->ip6_frag_id = 0;
 505        shinfo->tx_flags.flags = 0;
 506        shinfo->frag_list = NULL;
 507        memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
 508
 509        memset(skb, 0, offsetof(struct sk_buff, tail));
 510        skb->data = skb->head + NET_SKB_PAD;
 511        skb_reset_tail_pointer(skb);
 512
 513        return 1;
 514}
 515EXPORT_SYMBOL(skb_recycle_check);
 516
 517static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 518{
 519        new->tstamp             = old->tstamp;
 520        new->dev                = old->dev;
 521        new->transport_header   = old->transport_header;
 522        new->network_header     = old->network_header;
 523        new->mac_header         = old->mac_header;
 524        new->dst                = dst_clone(old->dst);
 525#ifdef CONFIG_XFRM
 526        new->sp                 = secpath_get(old->sp);
 527#endif
 528        memcpy(new->cb, old->cb, sizeof(old->cb));
 529        new->csum_start         = old->csum_start;
 530        new->csum_offset        = old->csum_offset;
 531        new->local_df           = old->local_df;
 532        new->pkt_type           = old->pkt_type;
 533        new->ip_summed          = old->ip_summed;
 534        skb_copy_queue_mapping(new, old);
 535        new->priority           = old->priority;
 536#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
 537        new->ipvs_property      = old->ipvs_property;
 538#endif
 539        new->protocol           = old->protocol;
 540        new->mark               = old->mark;
 541        __nf_copy(new, old);
 542#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
 543    defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
 544        new->nf_trace           = old->nf_trace;
 545#endif
 546#ifdef CONFIG_NET_SCHED
 547        new->tc_index           = old->tc_index;
 548#ifdef CONFIG_NET_CLS_ACT
 549        new->tc_verd            = old->tc_verd;
 550#endif
 551#endif
 552        new->vlan_tci           = old->vlan_tci;
 553
 554        skb_copy_secmark(new, old);
 555}
 556
 557static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
 558{
 559#define C(x) n->x = skb->x
 560
 561        n->next = n->prev = NULL;
 562        n->sk = NULL;
 563        __copy_skb_header(n, skb);
 564
 565        C(len);
 566        C(data_len);
 567        C(mac_len);
 568        n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
 569        n->cloned = 1;
 570        n->nohdr = 0;
 571        n->destructor = NULL;
 572        C(iif);
 573        C(tail);
 574        C(end);
 575        C(head);
 576        C(data);
 577        C(truesize);
 578#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
 579        C(do_not_encrypt);
 580        C(requeue);
 581#endif
 582        atomic_set(&n->users, 1);
 583
 584        atomic_inc(&(skb_shinfo(skb)->dataref));
 585        skb->cloned = 1;
 586
 587        return n;
 588#undef C
 589}
 590
 591/**
 592 *      skb_morph       -       morph one skb into another
 593 *      @dst: the skb to receive the contents
 594 *      @src: the skb to supply the contents
 595 *
 596 *      This is identical to skb_clone except that the target skb is
 597 *      supplied by the user.
 598 *
 599 *      The target skb is returned upon exit.
 600 */
 601struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
 602{
 603        skb_release_all(dst);
 604        return __skb_clone(dst, src);
 605}
 606EXPORT_SYMBOL_GPL(skb_morph);
 607
 608/**
 609 *      skb_clone       -       duplicate an sk_buff
 610 *      @skb: buffer to clone
 611 *      @gfp_mask: allocation priority
 612 *
 613 *      Duplicate an &sk_buff. The new one is not owned by a socket. Both
 614 *      copies share the same packet data but not structure. The new
 615 *      buffer has a reference count of 1. If the allocation fails the
 616 *      function returns %NULL otherwise the new buffer is returned.
 617 *
 618 *      If this function is called from an interrupt gfp_mask() must be
 619 *      %GFP_ATOMIC.
 620 */
 621
 622struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
 623{
 624        struct sk_buff *n;
 625
 626        n = skb + 1;
 627        if (skb->fclone == SKB_FCLONE_ORIG &&
 628            n->fclone == SKB_FCLONE_UNAVAILABLE) {
 629                atomic_t *fclone_ref = (atomic_t *) (n + 1);
 630                n->fclone = SKB_FCLONE_CLONE;
 631                atomic_inc(fclone_ref);
 632        } else {
 633                n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
 634                if (!n)
 635                        return NULL;
 636                n->fclone = SKB_FCLONE_UNAVAILABLE;
 637        }
 638
 639        return __skb_clone(n, skb);
 640}
 641EXPORT_SYMBOL(skb_clone);
 642
 643static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 644{
 645#ifndef NET_SKBUFF_DATA_USES_OFFSET
 646        /*
 647         *      Shift between the two data areas in bytes
 648         */
 649        unsigned long offset = new->data - old->data;
 650#endif
 651
 652        __copy_skb_header(new, old);
 653
 654#ifndef NET_SKBUFF_DATA_USES_OFFSET
 655        /* {transport,network,mac}_header are relative to skb->head */
 656        new->transport_header += offset;
 657        new->network_header   += offset;
 658        new->mac_header       += offset;
 659#endif
 660        skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
 661        skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
 662        skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
 663}
 664
 665/**
 666 *      skb_copy        -       create private copy of an sk_buff
 667 *      @skb: buffer to copy
 668 *      @gfp_mask: allocation priority
 669 *
 670 *      Make a copy of both an &sk_buff and its data. This is used when the
 671 *      caller wishes to modify the data and needs a private copy of the
 672 *      data to alter. Returns %NULL on failure or the pointer to the buffer
 673 *      on success. The returned buffer has a reference count of 1.
 674 *
 675 *      As by-product this function converts non-linear &sk_buff to linear
 676 *      one, so that &sk_buff becomes completely private and caller is allowed
 677 *      to modify all the data of returned buffer. This means that this
 678 *      function is not recommended for use in circumstances when only
 679 *      header is going to be modified. Use pskb_copy() instead.
 680 */
 681
 682struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
 683{
 684        int headerlen = skb->data - skb->head;
 685        /*
 686         *      Allocate the copy buffer
 687         */
 688        struct sk_buff *n;
 689#ifdef NET_SKBUFF_DATA_USES_OFFSET
 690        n = alloc_skb(skb->end + skb->data_len, gfp_mask);
 691#else
 692        n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
 693#endif
 694        if (!n)
 695                return NULL;
 696
 697        /* Set the data pointer */
 698        skb_reserve(n, headerlen);
 699        /* Set the tail pointer and length */
 700        skb_put(n, skb->len);
 701
 702        if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
 703                BUG();
 704
 705        copy_skb_header(n, skb);
 706        return n;
 707}
 708EXPORT_SYMBOL(skb_copy);
 709
 710/**
 711 *      pskb_copy       -       create copy of an sk_buff with private head.
 712 *      @skb: buffer to copy
 713 *      @gfp_mask: allocation priority
 714 *
 715 *      Make a copy of both an &sk_buff and part of its data, located
 716 *      in header. Fragmented data remain shared. This is used when
 717 *      the caller wishes to modify only header of &sk_buff and needs
 718 *      private copy of the header to alter. Returns %NULL on failure
 719 *      or the pointer to the buffer on success.
 720 *      The returned buffer has a reference count of 1.
 721 */
 722
 723struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
 724{
 725        /*
 726         *      Allocate the copy buffer
 727         */
 728        struct sk_buff *n;
 729#ifdef NET_SKBUFF_DATA_USES_OFFSET
 730        n = alloc_skb(skb->end, gfp_mask);
 731#else
 732        n = alloc_skb(skb->end - skb->head, gfp_mask);
 733#endif
 734        if (!n)
 735                goto out;
 736
 737        /* Set the data pointer */
 738        skb_reserve(n, skb->data - skb->head);
 739        /* Set the tail pointer and length */
 740        skb_put(n, skb_headlen(skb));
 741        /* Copy the bytes */
 742        skb_copy_from_linear_data(skb, n->data, n->len);
 743
 744        n->truesize += skb->data_len;
 745        n->data_len  = skb->data_len;
 746        n->len       = skb->len;
 747
 748        if (skb_shinfo(skb)->nr_frags) {
 749                int i;
 750
 751                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 752                        skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
 753                        get_page(skb_shinfo(n)->frags[i].page);
 754                }
 755                skb_shinfo(n)->nr_frags = i;
 756        }
 757
 758        if (skb_shinfo(skb)->frag_list) {
 759                skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
 760                skb_clone_fraglist(n);
 761        }
 762
 763        copy_skb_header(n, skb);
 764out:
 765        return n;
 766}
 767EXPORT_SYMBOL(pskb_copy);
 768
 769/**
 770 *      pskb_expand_head - reallocate header of &sk_buff
 771 *      @skb: buffer to reallocate
 772 *      @nhead: room to add at head
 773 *      @ntail: room to add at tail
 774 *      @gfp_mask: allocation priority
 775 *
 776 *      Expands (or creates identical copy, if &nhead and &ntail are zero)
 777 *      header of skb. &sk_buff itself is not changed. &sk_buff MUST have
 778 *      reference count of 1. Returns zero in the case of success or error,
 779 *      if expansion failed. In the last case, &sk_buff is not changed.
 780 *
 781 *      All the pointers pointing into skb header may change and must be
 782 *      reloaded after call to this function.
 783 */
 784
 785int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 786                     gfp_t gfp_mask)
 787{
 788        int i;
 789        u8 *data;
 790#ifdef NET_SKBUFF_DATA_USES_OFFSET
 791        int size = nhead + skb->end + ntail;
 792#else
 793        int size = nhead + (skb->end - skb->head) + ntail;
 794#endif
 795        long off;
 796
 797        BUG_ON(nhead < 0);
 798
 799        if (skb_shared(skb))
 800                BUG();
 801
 802        size = SKB_DATA_ALIGN(size);
 803
 804        data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
 805        if (!data)
 806                goto nodata;
 807
 808        /* Copy only real data... and, alas, header. This should be
 809         * optimized for the cases when header is void. */
 810#ifdef NET_SKBUFF_DATA_USES_OFFSET
 811        memcpy(data + nhead, skb->head, skb->tail);
 812#else
 813        memcpy(data + nhead, skb->head, skb->tail - skb->head);
 814#endif
 815        memcpy(data + size, skb_end_pointer(skb),
 816               sizeof(struct skb_shared_info));
 817
 818        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
 819                get_page(skb_shinfo(skb)->frags[i].page);
 820
 821        if (skb_shinfo(skb)->frag_list)
 822                skb_clone_fraglist(skb);
 823
 824        skb_release_data(skb);
 825
 826        off = (data + nhead) - skb->head;
 827
 828        skb->head     = data;
 829        skb->data    += off;
 830#ifdef NET_SKBUFF_DATA_USES_OFFSET
 831        skb->end      = size;
 832        off           = nhead;
 833#else
 834        skb->end      = skb->head + size;
 835#endif
 836        /* {transport,network,mac}_header and tail are relative to skb->head */
 837        skb->tail             += off;
 838        skb->transport_header += off;
 839        skb->network_header   += off;
 840        skb->mac_header       += off;
 841        skb->csum_start       += nhead;
 842        skb->cloned   = 0;
 843        skb->hdr_len  = 0;
 844        skb->nohdr    = 0;
 845        atomic_set(&skb_shinfo(skb)->dataref, 1);
 846        return 0;
 847
 848nodata:
 849        return -ENOMEM;
 850}
 851EXPORT_SYMBOL(pskb_expand_head);
 852
 853/* Make private copy of skb with writable head and some headroom */
 854
 855struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
 856{
 857        struct sk_buff *skb2;
 858        int delta = headroom - skb_headroom(skb);
 859
 860        if (delta <= 0)
 861                skb2 = pskb_copy(skb, GFP_ATOMIC);
 862        else {
 863                skb2 = skb_clone(skb, GFP_ATOMIC);
 864                if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
 865                                             GFP_ATOMIC)) {
 866                        kfree_skb(skb2);
 867                        skb2 = NULL;
 868                }
 869        }
 870        return skb2;
 871}
 872EXPORT_SYMBOL(skb_realloc_headroom);
 873
 874/**
 875 *      skb_copy_expand -       copy and expand sk_buff
 876 *      @skb: buffer to copy
 877 *      @newheadroom: new free bytes at head
 878 *      @newtailroom: new free bytes at tail
 879 *      @gfp_mask: allocation priority
 880 *
 881 *      Make a copy of both an &sk_buff and its data and while doing so
 882 *      allocate additional space.
 883 *
 884 *      This is used when the caller wishes to modify the data and needs a
 885 *      private copy of the data to alter as well as more space for new fields.
 886 *      Returns %NULL on failure or the pointer to the buffer
 887 *      on success. The returned buffer has a reference count of 1.
 888 *
 889 *      You must pass %GFP_ATOMIC as the allocation priority if this function
 890 *      is called from an interrupt.
 891 */
 892struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
 893                                int newheadroom, int newtailroom,
 894                                gfp_t gfp_mask)
 895{
 896        /*
 897         *      Allocate the copy buffer
 898         */
 899        struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
 900                                      gfp_mask);
 901        int oldheadroom = skb_headroom(skb);
 902        int head_copy_len, head_copy_off;
 903        int off;
 904
 905        if (!n)
 906                return NULL;
 907
 908        skb_reserve(n, newheadroom);
 909
 910        /* Set the tail pointer and length */
 911        skb_put(n, skb->len);
 912
 913        head_copy_len = oldheadroom;
 914        head_copy_off = 0;
 915        if (newheadroom <= head_copy_len)
 916                head_copy_len = newheadroom;
 917        else
 918                head_copy_off = newheadroom - head_copy_len;
 919
 920        /* Copy the linear header and data. */
 921        if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
 922                          skb->len + head_copy_len))
 923                BUG();
 924
 925        copy_skb_header(n, skb);
 926
 927        off                  = newheadroom - oldheadroom;
 928        n->csum_start       += off;
 929#ifdef NET_SKBUFF_DATA_USES_OFFSET
 930        n->transport_header += off;
 931        n->network_header   += off;
 932        n->mac_header       += off;
 933#endif
 934
 935        return n;
 936}
 937EXPORT_SYMBOL(skb_copy_expand);
 938
 939/**
 940 *      skb_pad                 -       zero pad the tail of an skb
 941 *      @skb: buffer to pad
 942 *      @pad: space to pad
 943 *
 944 *      Ensure that a buffer is followed by a padding area that is zero
 945 *      filled. Used by network drivers which may DMA or transfer data
 946 *      beyond the buffer end onto the wire.
 947 *
 948 *      May return error in out of memory cases. The skb is freed on error.
 949 */
 950
 951int skb_pad(struct sk_buff *skb, int pad)
 952{
 953        int err;
 954        int ntail;
 955
 956        /* If the skbuff is non linear tailroom is always zero.. */
 957        if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
 958                memset(skb->data+skb->len, 0, pad);
 959                return 0;
 960        }
 961
 962        ntail = skb->data_len + pad - (skb->end - skb->tail);
 963        if (likely(skb_cloned(skb) || ntail > 0)) {
 964                err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
 965                if (unlikely(err))
 966                        goto free_skb;
 967        }
 968
 969        /* FIXME: The use of this function with non-linear skb's really needs
 970         * to be audited.
 971         */
 972        err = skb_linearize(skb);
 973        if (unlikely(err))
 974                goto free_skb;
 975
 976        memset(skb->data + skb->len, 0, pad);
 977        return 0;
 978
 979free_skb:
 980        kfree_skb(skb);
 981        return err;
 982}
 983EXPORT_SYMBOL(skb_pad);
 984
 985/**
 986 *      skb_put - add data to a buffer
 987 *      @skb: buffer to use
 988 *      @len: amount of data to add
 989 *
 990 *      This function extends the used data area of the buffer. If this would
 991 *      exceed the total buffer size the kernel will panic. A pointer to the
 992 *      first byte of the extra data is returned.
 993 */
 994unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
 995{
 996        unsigned char *tmp = skb_tail_pointer(skb);
 997        SKB_LINEAR_ASSERT(skb);
 998        skb->tail += len;
 999        skb->len  += len;
1000        if (unlikely(skb->tail > skb->end))
1001                skb_over_panic(skb, len, __builtin_return_address(0));
1002        return tmp;
1003}
1004EXPORT_SYMBOL(skb_put);
1005
1006/**
1007 *      skb_push - add data to the start of a buffer
1008 *      @skb: buffer to use
1009 *      @len: amount of data to add
1010 *
1011 *      This function extends the used data area of the buffer at the buffer
1012 *      start. If this would exceed the total buffer headroom the kernel will
1013 *      panic. A pointer to the first byte of the extra data is returned.
1014 */
1015unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
1016{
1017        skb->data -= len;
1018        skb->len  += len;
1019        if (unlikely(skb->data<skb->head))
1020                skb_under_panic(skb, len, __builtin_return_address(0));
1021        return skb->data;
1022}
1023EXPORT_SYMBOL(skb_push);
1024
1025/**
1026 *      skb_pull - remove data from the start of a buffer
1027 *      @skb: buffer to use
1028 *      @len: amount of data to remove
1029 *
1030 *      This function removes data from the start of a buffer, returning
1031 *      the memory to the headroom. A pointer to the next data in the buffer
1032 *      is returned. Once the data has been pulled future pushes will overwrite
1033 *      the old data.
1034 */
1035unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
1036{
1037        return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1038}
1039EXPORT_SYMBOL(skb_pull);
1040
1041/**
1042 *      skb_trim - remove end from a buffer
1043 *      @skb: buffer to alter
1044 *      @len: new length
1045 *
1046 *      Cut the length of a buffer down by removing data from the tail. If
1047 *      the buffer is already under the length specified it is not modified.
1048 *      The skb must be linear.
1049 */
1050void skb_trim(struct sk_buff *skb, unsigned int len)
1051{
1052        if (skb->len > len)
1053                __skb_trim(skb, len);
1054}
1055EXPORT_SYMBOL(skb_trim);
1056
1057/* Trims skb to length len. It can change skb pointers.
1058 */
1059
1060int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1061{
1062        struct sk_buff **fragp;
1063        struct sk_buff *frag;
1064        int offset = skb_headlen(skb);
1065        int nfrags = skb_shinfo(skb)->nr_frags;
1066        int i;
1067        int err;
1068
1069        if (skb_cloned(skb) &&
1070            unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1071                return err;
1072
1073        i = 0;
1074        if (offset >= len)
1075                goto drop_pages;
1076
1077        for (; i < nfrags; i++) {
1078                int end = offset + skb_shinfo(skb)->frags[i].size;
1079
1080                if (end < len) {
1081                        offset = end;
1082                        continue;
1083                }
1084
1085                skb_shinfo(skb)->frags[i++].size = len - offset;
1086
1087drop_pages:
1088                skb_shinfo(skb)->nr_frags = i;
1089
1090                for (; i < nfrags; i++)
1091                        put_page(skb_shinfo(skb)->frags[i].page);
1092
1093                if (skb_shinfo(skb)->frag_list)
1094                        skb_drop_fraglist(skb);
1095                goto done;
1096        }
1097
1098        for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1099             fragp = &frag->next) {
1100                int end = offset + frag->len;
1101
1102                if (skb_shared(frag)) {
1103                        struct sk_buff *nfrag;
1104
1105                        nfrag = skb_clone(frag, GFP_ATOMIC);
1106                        if (unlikely(!nfrag))
1107                                return -ENOMEM;
1108
1109                        nfrag->next = frag->next;
1110                        kfree_skb(frag);
1111                        frag = nfrag;
1112                        *fragp = frag;
1113                }
1114
1115                if (end < len) {
1116                        offset = end;
1117                        continue;
1118                }
1119
1120                if (end > len &&
1121                    unlikely((err = pskb_trim(frag, len - offset))))
1122                        return err;
1123
1124                if (frag->next)
1125                        skb_drop_list(&frag->next);
1126                break;
1127        }
1128
1129done:
1130        if (len > skb_headlen(skb)) {
1131                skb->data_len -= skb->len - len;
1132                skb->len       = len;
1133        } else {
1134                skb->len       = len;
1135                skb->data_len  = 0;
1136                skb_set_tail_pointer(skb, len);
1137        }
1138
1139        return 0;
1140}
1141EXPORT_SYMBOL(___pskb_trim);
1142
1143/**
1144 *      __pskb_pull_tail - advance tail of skb header
1145 *      @skb: buffer to reallocate
1146 *      @delta: number of bytes to advance tail
1147 *
1148 *      The function makes a sense only on a fragmented &sk_buff,
1149 *      it expands header moving its tail forward and copying necessary
1150 *      data from fragmented part.
1151 *
1152 *      &sk_buff MUST have reference count of 1.
1153 *
1154 *      Returns %NULL (and &sk_buff does not change) if pull failed
1155 *      or value of new tail of skb in the case of success.
1156 *
1157 *      All the pointers pointing into skb header may change and must be
1158 *      reloaded after call to this function.
1159 */
1160
1161/* Moves tail of skb head forward, copying data from fragmented part,
1162 * when it is necessary.
1163 * 1. It may fail due to malloc failure.
1164 * 2. It may change skb pointers.
1165 *
1166 * It is pretty complicated. Luckily, it is called only in exceptional cases.
1167 */
1168unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1169{
1170        /* If skb has not enough free space at tail, get new one
1171         * plus 128 bytes for future expansions. If we have enough
1172         * room at tail, reallocate without expansion only if skb is cloned.
1173         */
1174        int i, k, eat = (skb->tail + delta) - skb->end;
1175
1176        if (eat > 0 || skb_cloned(skb)) {
1177                if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1178                                     GFP_ATOMIC))
1179                        return NULL;
1180        }
1181
1182        if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1183                BUG();
1184
1185        /* Optimization: no fragments, no reasons to preestimate
1186         * size of pulled pages. Superb.
1187         */
1188        if (!skb_shinfo(skb)->frag_list)
1189                goto pull_pages;
1190
1191        /* Estimate size of pulled pages. */
1192        eat = delta;
1193        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1194                if (skb_shinfo(skb)->frags[i].size >= eat)
1195                        goto pull_pages;
1196                eat -= skb_shinfo(skb)->frags[i].size;
1197        }
1198
1199        /* If we need update frag list, we are in troubles.
1200         * Certainly, it possible to add an offset to skb data,
1201         * but taking into account that pulling is expected to
1202         * be very rare operation, it is worth to fight against
1203         * further bloating skb head and crucify ourselves here instead.
1204         * Pure masohism, indeed. 8)8)
1205         */
1206        if (eat) {
1207                struct sk_buff *list = skb_shinfo(skb)->frag_list;
1208                struct sk_buff *clone = NULL;
1209                struct sk_buff *insp = NULL;
1210
1211                do {
1212                        BUG_ON(!list);
1213
1214                        if (list->len <= eat) {
1215                                /* Eaten as whole. */
1216                                eat -= list->len;
1217                                list = list->next;
1218                                insp = list;
1219                        } else {
1220                                /* Eaten partially. */
1221
1222                                if (skb_shared(list)) {
1223                                        /* Sucks! We need to fork list. :-( */
1224                                        clone = skb_clone(list, GFP_ATOMIC);
1225                                        if (!clone)
1226                                                return NULL;
1227                                        insp = list->next;
1228                                        list = clone;
1229                                } else {
1230                                        /* This may be pulled without
1231                                         * problems. */
1232                                        insp = list;
1233                                }
1234                                if (!pskb_pull(list, eat)) {
1235                                        kfree_skb(clone);
1236                                        return NULL;
1237                                }
1238                                break;
1239                        }
1240                } while (eat);
1241
1242                /* Free pulled out fragments. */
1243                while ((list = skb_shinfo(skb)->frag_list) != insp) {
1244                        skb_shinfo(skb)->frag_list = list->next;
1245                        kfree_skb(list);
1246                }
1247                /* And insert new clone at head. */
1248                if (clone) {
1249                        clone->next = list;
1250                        skb_shinfo(skb)->frag_list = clone;
1251                }
1252        }
1253        /* Success! Now we may commit changes to skb data. */
1254
1255pull_pages:
1256        eat = delta;
1257        k = 0;
1258        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1259                if (skb_shinfo(skb)->frags[i].size <= eat) {
1260                        put_page(skb_shinfo(skb)->frags[i].page);
1261                        eat -= skb_shinfo(skb)->frags[i].size;
1262                } else {
1263                        skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1264                        if (eat) {
1265                                skb_shinfo(skb)->frags[k].page_offset += eat;
1266                                skb_shinfo(skb)->frags[k].size -= eat;
1267                                eat = 0;
1268                        }
1269                        k++;
1270                }
1271        }
1272        skb_shinfo(skb)->nr_frags = k;
1273
1274        skb->tail     += delta;
1275        skb->data_len -= delta;
1276
1277        return skb_tail_pointer(skb);
1278}
1279EXPORT_SYMBOL(__pskb_pull_tail);
1280
1281/* Copy some data bits from skb to kernel buffer. */
1282
1283int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1284{
1285        int i, copy;
1286        int start = skb_headlen(skb);
1287
1288        if (offset > (int)skb->len - len)
1289                goto fault;
1290
1291        /* Copy header. */
1292        if ((copy = start - offset) > 0) {
1293                if (copy > len)
1294                        copy = len;
1295                skb_copy_from_linear_data_offset(skb, offset, to, copy);
1296                if ((len -= copy) == 0)
1297                        return 0;
1298                offset += copy;
1299                to     += copy;
1300        }
1301
1302        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1303                int end;
1304
1305                WARN_ON(start > offset + len);
1306
1307                end = start + skb_shinfo(skb)->frags[i].size;
1308                if ((copy = end - offset) > 0) {
1309                        u8 *vaddr;
1310
1311                        if (copy > len)
1312                                copy = len;
1313
1314                        vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
1315                        memcpy(to,
1316                               vaddr + skb_shinfo(skb)->frags[i].page_offset+
1317                               offset - start, copy);
1318                        kunmap_skb_frag(vaddr);
1319
1320                        if ((len -= copy) == 0)
1321                                return 0;
1322                        offset += copy;
1323                        to     += copy;
1324                }
1325                start = end;
1326        }
1327
1328        if (skb_shinfo(skb)->frag_list) {
1329                struct sk_buff *list = skb_shinfo(skb)->frag_list;
1330
1331                for (; list; list = list->next) {
1332                        int end;
1333
1334                        WARN_ON(start > offset + len);
1335
1336                        end = start + list->len;
1337                        if ((copy = end - offset) > 0) {
1338                                if (copy > len)
1339                                        copy = len;
1340                                if (skb_copy_bits(list, offset - start,
1341                                                  to, copy))
1342                                        goto fault;
1343                                if ((len -= copy) == 0)
1344                                        return 0;
1345                                offset += copy;
1346                                to     += copy;
1347                        }
1348                        start = end;
1349                }
1350        }
1351        if (!len)
1352                return 0;
1353
1354fault:
1355        return -EFAULT;
1356}
1357EXPORT_SYMBOL(skb_copy_bits);
1358
1359/*
1360 * Callback from splice_to_pipe(), if we need to release some pages
1361 * at the end of the spd in case we error'ed out in filling the pipe.
1362 */
1363static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1364{
1365        put_page(spd->pages[i]);
1366}
1367
1368static inline struct page *linear_to_page(struct page *page, unsigned int *len,
1369                                          unsigned int *offset,
1370                                          struct sk_buff *skb, struct sock *sk)
1371{
1372        struct page *p = sk->sk_sndmsg_page;
1373        unsigned int off;
1374
1375        if (!p) {
1376new_page:
1377                p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0);
1378                if (!p)
1379                        return NULL;
1380
1381                off = sk->sk_sndmsg_off = 0;
1382                /* hold one ref to this page until it's full */
1383        } else {
1384                unsigned int mlen;
1385
1386                off = sk->sk_sndmsg_off;
1387                mlen = PAGE_SIZE - off;
1388                if (mlen < 64 && mlen < *len) {
1389                        put_page(p);
1390                        goto new_page;
1391                }
1392
1393                *len = min_t(unsigned int, *len, mlen);
1394        }
1395
1396        memcpy(page_address(p) + off, page_address(page) + *offset, *len);
1397        sk->sk_sndmsg_off += *len;
1398        *offset = off;
1399        get_page(p);
1400
1401        return p;
1402}
1403
1404/*
1405 * Fill page/offset/length into spd, if it can hold more pages.
1406 */
1407static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
1408                                unsigned int *len, unsigned int offset,
1409                                struct sk_buff *skb, int linear,
1410                                struct sock *sk)
1411{
1412        if (unlikely(spd->nr_pages == PIPE_BUFFERS))
1413                return 1;
1414
1415        if (linear) {
1416                page = linear_to_page(page, len, &offset, skb, sk);
1417                if (!page)
1418                        return 1;
1419        } else
1420                get_page(page);
1421
1422        spd->pages[spd->nr_pages] = page;
1423        spd->partial[spd->nr_pages].len = *len;
1424        spd->partial[spd->nr_pages].offset = offset;
1425        spd->nr_pages++;
1426
1427        return 0;
1428}
1429
1430static inline void __segment_seek(struct page **page, unsigned int *poff,
1431                                  unsigned int *plen, unsigned int off)
1432{
1433        unsigned long n;
1434
1435        *poff += off;
1436        n = *poff / PAGE_SIZE;
1437        if (n)
1438                *page = nth_page(*page, n);
1439
1440        *poff = *poff % PAGE_SIZE;
1441        *plen -= off;
1442}
1443
1444static inline int __splice_segment(struct page *page, unsigned int poff,
1445                                   unsigned int plen, unsigned int *off,
1446                                   unsigned int *len, struct sk_buff *skb,
1447                                   struct splice_pipe_desc *spd, int linear,
1448                                   struct sock *sk)
1449{
1450        if (!*len)
1451                return 1;
1452
1453        /* skip this segment if already processed */
1454        if (*off >= plen) {
1455                *off -= plen;
1456                return 0;
1457        }
1458
1459        /* ignore any bits we already processed */
1460        if (*off) {
1461                __segment_seek(&page, &poff, &plen, *off);
1462                *off = 0;
1463        }
1464
1465        do {
1466                unsigned int flen = min(*len, plen);
1467
1468                /* the linear region may spread across several pages  */
1469                flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
1470
1471                if (spd_fill_page(spd, page, &flen, poff, skb, linear, sk))
1472                        return 1;
1473
1474                __segment_seek(&page, &poff, &plen, flen);
1475                *len -= flen;
1476
1477        } while (*len && plen);
1478
1479        return 0;
1480}
1481
1482/*
1483 * Map linear and fragment data from the skb to spd. It reports failure if the
1484 * pipe is full or if we already spliced the requested length.
1485 */
1486static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
1487                             unsigned int *len, struct splice_pipe_desc *spd,
1488                             struct sock *sk)
1489{
1490        int seg;
1491
1492        /*
1493         * map the linear part
1494         */
1495        if (__splice_segment(virt_to_page(skb->data),
1496                             (unsigned long) skb->data & (PAGE_SIZE - 1),
1497                             skb_headlen(skb),
1498                             offset, len, skb, spd, 1, sk))
1499                return 1;
1500
1501        /*
1502         * then map the fragments
1503         */
1504        for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1505                const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1506
1507                if (__splice_segment(f->page, f->page_offset, f->size,
1508                                     offset, len, skb, spd, 0, sk))
1509                        return 1;
1510        }
1511
1512        return 0;
1513}
1514
1515/*
1516 * Map data from the skb to a pipe. Should handle both the linear part,
1517 * the fragments, and the frag list. It does NOT handle frag lists within
1518 * the frag list, if such a thing exists. We'd probably need to recurse to
1519 * handle that cleanly.
1520 */
1521int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1522                    struct pipe_inode_info *pipe, unsigned int tlen,
1523                    unsigned int flags)
1524{
1525        struct partial_page partial[PIPE_BUFFERS];
1526        struct page *pages[PIPE_BUFFERS];
1527        struct splice_pipe_desc spd = {
1528                .pages = pages,
1529                .partial = partial,
1530                .flags = flags,
1531                .ops = &sock_pipe_buf_ops,
1532                .spd_release = sock_spd_release,
1533        };
1534        struct sock *sk = skb->sk;
1535
1536        /*
1537         * __skb_splice_bits() only fails if the output has no room left,
1538         * so no point in going over the frag_list for the error case.
1539         */
1540        if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk))
1541                goto done;
1542        else if (!tlen)
1543                goto done;
1544
1545        /*
1546         * now see if we have a frag_list to map
1547         */
1548        if (skb_shinfo(skb)->frag_list) {
1549                struct sk_buff *list = skb_shinfo(skb)->frag_list;
1550
1551                for (; list && tlen; list = list->next) {
1552                        if (__skb_splice_bits(list, &offset, &tlen, &spd, sk))
1553                                break;
1554                }
1555        }
1556
1557done:
1558        if (spd.nr_pages) {
1559                int ret;
1560
1561                /*
1562                 * Drop the socket lock, otherwise we have reverse
1563                 * locking dependencies between sk_lock and i_mutex
1564                 * here as compared to sendfile(). We enter here
1565                 * with the socket lock held, and splice_to_pipe() will
1566                 * grab the pipe inode lock. For sendfile() emulation,
1567                 * we call into ->sendpage() with the i_mutex lock held
1568                 * and networking will grab the socket lock.
1569                 */
1570                release_sock(sk);
1571                ret = splice_to_pipe(pipe, &spd);
1572                lock_sock(sk);
1573                return ret;
1574        }
1575
1576        return 0;
1577}
1578
1579/**
1580 *      skb_store_bits - store bits from kernel buffer to skb
1581 *      @skb: destination buffer
1582 *      @offset: offset in destination
1583 *      @from: source buffer
1584 *      @len: number of bytes to copy
1585 *
1586 *      Copy the specified number of bytes from the source buffer to the
1587 *      destination skb.  This function handles all the messy bits of
1588 *      traversing fragment lists and such.
1589 */
1590
1591int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1592{
1593        int i, copy;
1594        int start = skb_headlen(skb);
1595
1596        if (offset > (int)skb->len - len)
1597                goto fault;
1598
1599        if ((copy = start - offset) > 0) {
1600                if (copy > len)
1601                        copy = len;
1602                skb_copy_to_linear_data_offset(skb, offset, from, copy);
1603                if ((len -= copy) == 0)
1604                        return 0;
1605                offset += copy;
1606                from += copy;
1607        }
1608
1609        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1610                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1611                int end;
1612
1613                WARN_ON(start > offset + len);
1614
1615                end = start + frag->size;
1616                if ((copy = end - offset) > 0) {
1617                        u8 *vaddr;
1618
1619                        if (copy > len)
1620                                copy = len;
1621
1622                        vaddr = kmap_skb_frag(frag);
1623                        memcpy(vaddr + frag->page_offset + offset - start,
1624                               from, copy);
1625                        kunmap_skb_frag(vaddr);
1626
1627                        if ((len -= copy) == 0)
1628                                return 0;
1629                        offset += copy;
1630                        from += copy;
1631                }
1632                start = end;
1633        }
1634
1635        if (skb_shinfo(skb)->frag_list) {
1636                struct sk_buff *list = skb_shinfo(skb)->frag_list;
1637
1638                for (; list; list = list->next) {
1639                        int end;
1640
1641                        WARN_ON(start > offset + len);
1642
1643                        end = start + list->len;
1644                        if ((copy = end - offset) > 0) {
1645                                if (copy > len)
1646                                        copy = len;
1647                                if (skb_store_bits(list, offset - start,
1648                                                   from, copy))
1649                                        goto fault;
1650                                if ((len -= copy) == 0)
1651                                        return 0;
1652                                offset += copy;
1653                                from += copy;
1654                        }
1655                        start = end;
1656                }
1657        }
1658        if (!len)
1659                return 0;
1660
1661fault:
1662        return -EFAULT;
1663}
1664EXPORT_SYMBOL(skb_store_bits);
1665
1666/* Checksum skb data. */
1667
1668__wsum skb_checksum(const struct sk_buff *skb, int offset,
1669                          int len, __wsum csum)
1670{
1671        int start = skb_headlen(skb);
1672        int i, copy = start - offset;
1673        int pos = 0;
1674
1675        /* Checksum header. */
1676        if (copy > 0) {
1677                if (copy > len)
1678                        copy = len;
1679                csum = csum_partial(skb->data + offset, copy, csum);
1680                if ((len -= copy) == 0)
1681                        return csum;
1682                offset += copy;
1683                pos     = copy;
1684        }
1685
1686        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1687                int end;
1688
1689                WARN_ON(start > offset + len);
1690
1691                end = start + skb_shinfo(skb)->frags[i].size;
1692                if ((copy = end - offset) > 0) {
1693                        __wsum csum2;
1694                        u8 *vaddr;
1695                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1696
1697                        if (copy > len)
1698                                copy = len;
1699                        vaddr = kmap_skb_frag(frag);
1700                        csum2 = csum_partial(vaddr + frag->page_offset +
1701                                             offset - start, copy, 0);
1702                        kunmap_skb_frag(vaddr);
1703                        csum = csum_block_add(csum, csum2, pos);
1704                        if (!(len -= copy))
1705                                return csum;
1706                        offset += copy;
1707                        pos    += copy;
1708                }
1709                start = end;
1710        }
1711
1712        if (skb_shinfo(skb)->frag_list) {
1713                struct sk_buff *list = skb_shinfo(skb)->frag_list;
1714
1715                for (; list; list = list->next) {
1716                        int end;
1717
1718                        WARN_ON(start > offset + len);
1719
1720                        end = start + list->len;
1721                        if ((copy = end - offset) > 0) {
1722                                __wsum csum2;
1723                                if (copy > len)
1724                                        copy = len;
1725                                csum2 = skb_checksum(list, offset - start,
1726                                                     copy, 0);
1727                                csum = csum_block_add(csum, csum2, pos);
1728                                if ((len -= copy) == 0)
1729                                        return csum;
1730                                offset += copy;
1731                                pos    += copy;
1732                        }
1733                        start = end;
1734                }
1735        }
1736        BUG_ON(len);
1737
1738        return csum;
1739}
1740EXPORT_SYMBOL(skb_checksum);
1741
1742/* Both of above in one bottle. */
1743
1744__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1745                                    u8 *to, int len, __wsum csum)
1746{
1747        int start = skb_headlen(skb);
1748        int i, copy = start - offset;
1749        int pos = 0;
1750
1751        /* Copy header. */
1752        if (copy > 0) {
1753                if (copy > len)
1754                        copy = len;
1755                csum = csum_partial_copy_nocheck(skb->data + offset, to,
1756                                                 copy, csum);
1757                if ((len -= copy) == 0)
1758                        return csum;
1759                offset += copy;
1760                to     += copy;
1761                pos     = copy;
1762        }
1763
1764        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1765                int end;
1766
1767                WARN_ON(start > offset + len);
1768
1769                end = start + skb_shinfo(skb)->frags[i].size;
1770                if ((copy = end - offset) > 0) {
1771                        __wsum csum2;
1772                        u8 *vaddr;
1773                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1774
1775                        if (copy > len)
1776                                copy = len;
1777                        vaddr = kmap_skb_frag(frag);
1778                        csum2 = csum_partial_copy_nocheck(vaddr +
1779                                                          frag->page_offset +
1780                                                          offset - start, to,
1781                                                          copy, 0);
1782                        kunmap_skb_frag(vaddr);
1783                        csum = csum_block_add(csum, csum2, pos);
1784                        if (!(len -= copy))
1785                                return csum;
1786                        offset += copy;
1787                        to     += copy;
1788                        pos    += copy;
1789                }
1790                start = end;
1791        }
1792
1793        if (skb_shinfo(skb)->frag_list) {
1794                struct sk_buff *list = skb_shinfo(skb)->frag_list;
1795
1796                for (; list; list = list->next) {
1797                        __wsum csum2;
1798                        int end;
1799
1800                        WARN_ON(start > offset + len);
1801
1802                        end = start + list->len;
1803                        if ((copy = end - offset) > 0) {
1804                                if (copy > len)
1805                                        copy = len;
1806                                csum2 = skb_copy_and_csum_bits(list,
1807                                                               offset - start,
1808                                                               to, copy, 0);
1809                                csum = csum_block_add(csum, csum2, pos);
1810                                if ((len -= copy) == 0)
1811                                        return csum;
1812                                offset += copy;
1813                                to     += copy;
1814                                pos    += copy;
1815                        }
1816                        start = end;
1817                }
1818        }
1819        BUG_ON(len);
1820        return csum;
1821}
1822EXPORT_SYMBOL(skb_copy_and_csum_bits);
1823
1824void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
1825{
1826        __wsum csum;
1827        long csstart;
1828
1829        if (skb->ip_summed == CHECKSUM_PARTIAL)
1830                csstart = skb->csum_start - skb_headroom(skb);
1831        else
1832                csstart = skb_headlen(skb);
1833
1834        BUG_ON(csstart > skb_headlen(skb));
1835
1836        skb_copy_from_linear_data(skb, to, csstart);
1837
1838        csum = 0;
1839        if (csstart != skb->len)
1840                csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
1841                                              skb->len - csstart, 0);
1842
1843        if (skb->ip_summed == CHECKSUM_PARTIAL) {
1844                long csstuff = csstart + skb->csum_offset;
1845
1846                *((__sum16 *)(to + csstuff)) = csum_fold(csum);
1847        }
1848}
1849EXPORT_SYMBOL(skb_copy_and_csum_dev);
1850
1851/**
1852 *      skb_dequeue - remove from the head of the queue
1853 *      @list: list to dequeue from
1854 *
1855 *      Remove the head of the list. The list lock is taken so the function
1856 *      may be used safely with other locking list functions. The head item is
1857 *      returned or %NULL if the list is empty.
1858 */
1859
1860struct sk_buff *skb_dequeue(struct sk_buff_head *list)
1861{
1862        unsigned long flags;
1863        struct sk_buff *result;
1864
1865        spin_lock_irqsave(&list->lock, flags);
1866        result = __skb_dequeue(list);
1867        spin_unlock_irqrestore(&list->lock, flags);
1868        return result;
1869}
1870EXPORT_SYMBOL(skb_dequeue);
1871
1872/**
1873 *      skb_dequeue_tail - remove from the tail of the queue
1874 *      @list: list to dequeue from
1875 *
1876 *      Remove the tail of the list. The list lock is taken so the function
1877 *      may be used safely with other locking list functions. The tail item is
1878 *      returned or %NULL if the list is empty.
1879 */
1880struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
1881{
1882        unsigned long flags;
1883        struct sk_buff *result;
1884
1885        spin_lock_irqsave(&list->lock, flags);
1886        result = __skb_dequeue_tail(list);
1887        spin_unlock_irqrestore(&list->lock, flags);
1888        return result;
1889}
1890EXPORT_SYMBOL(skb_dequeue_tail);
1891
1892/**
1893 *      skb_queue_purge - empty a list
1894 *      @list: list to empty
1895 *
1896 *      Delete all buffers on an &sk_buff list. Each buffer is removed from
1897 *      the list and one reference dropped. This function takes the list
1898 *      lock and is atomic with respect to other list locking functions.
1899 */
1900void skb_queue_purge(struct sk_buff_head *list)
1901{
1902        struct sk_buff *skb;
1903        while ((skb = skb_dequeue(list)) != NULL)
1904                kfree_skb(skb);
1905}
1906EXPORT_SYMBOL(skb_queue_purge);
1907
1908/**
1909 *      skb_queue_head - queue a buffer at the list head
1910 *      @list: list to use
1911 *      @newsk: buffer to queue
1912 *
1913 *      Queue a buffer at the start of the list. This function takes the
1914 *      list lock and can be used safely with other locking &sk_buff functions
1915 *      safely.
1916 *
1917 *      A buffer cannot be placed on two lists at the same time.
1918 */
1919void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
1920{
1921        unsigned long flags;
1922
1923        spin_lock_irqsave(&list->lock, flags);
1924        __skb_queue_head(list, newsk);
1925        spin_unlock_irqrestore(&list->lock, flags);
1926}
1927EXPORT_SYMBOL(skb_queue_head);
1928
1929/**
1930 *      skb_queue_tail - queue a buffer at the list tail
1931 *      @list: list to use
1932 *      @newsk: buffer to queue
1933 *
1934 *      Queue a buffer at the tail of the list. This function takes the
1935 *      list lock and can be used safely with other locking &sk_buff functions
1936 *      safely.
1937 *
1938 *      A buffer cannot be placed on two lists at the same time.
1939 */
1940void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
1941{
1942        unsigned long flags;
1943
1944        spin_lock_irqsave(&list->lock, flags);
1945        __skb_queue_tail(list, newsk);
1946        spin_unlock_irqrestore(&list->lock, flags);
1947}
1948EXPORT_SYMBOL(skb_queue_tail);
1949
1950/**
1951 *      skb_unlink      -       remove a buffer from a list
1952 *      @skb: buffer to remove
1953 *      @list: list to use
1954 *
1955 *      Remove a packet from a list. The list locks are taken and this
1956 *      function is atomic with respect to other list locked calls
1957 *
1958 *      You must know what list the SKB is on.
1959 */
1960void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1961{
1962        unsigned long flags;
1963
1964        spin_lock_irqsave(&list->lock, flags);
1965        __skb_unlink(skb, list);
1966        spin_unlock_irqrestore(&list->lock, flags);
1967}
1968EXPORT_SYMBOL(skb_unlink);
1969
1970/**
1971 *      skb_append      -       append a buffer
1972 *      @old: buffer to insert after
1973 *      @newsk: buffer to insert
1974 *      @list: list to use
1975 *
1976 *      Place a packet after a given packet in a list. The list locks are taken
1977 *      and this function is atomic with respect to other list locked calls.
1978 *      A buffer cannot be placed on two lists at the same time.
1979 */
1980void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1981{
1982        unsigned long flags;
1983
1984        spin_lock_irqsave(&list->lock, flags);
1985        __skb_queue_after(list, old, newsk);
1986        spin_unlock_irqrestore(&list->lock, flags);
1987}
1988EXPORT_SYMBOL(skb_append);
1989
1990/**
1991 *      skb_insert      -       insert a buffer
1992 *      @old: buffer to insert before
1993 *      @newsk: buffer to insert
1994 *      @list: list to use
1995 *
1996 *      Place a packet before a given packet in a list. The list locks are
1997 *      taken and this function is atomic with respect to other list locked
1998 *      calls.
1999 *
2000 *      A buffer cannot be placed on two lists at the same time.
2001 */
2002void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2003{
2004        unsigned long flags;
2005
2006        spin_lock_irqsave(&list->lock, flags);
2007        __skb_insert(newsk, old->prev, old, list);
2008        spin_unlock_irqrestore(&list->lock, flags);
2009}
2010EXPORT_SYMBOL(skb_insert);
2011
2012static inline void skb_split_inside_header(struct sk_buff *skb,
2013                                           struct sk_buff* skb1,
2014                                           const u32 len, const int pos)
2015{
2016        int i;
2017
2018        skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2019                                         pos - len);
2020        /* And move data appendix as is. */
2021        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2022                skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2023
2024        skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2025        skb_shinfo(skb)->nr_frags  = 0;
2026        skb1->data_len             = skb->data_len;
2027        skb1->len                  += skb1->data_len;
2028        skb->data_len              = 0;
2029        skb->len                   = len;
2030        skb_set_tail_pointer(skb, len);
2031}
2032
2033static inline void skb_split_no_header(struct sk_buff *skb,
2034                                       struct sk_buff* skb1,
2035                                       const u32 len, int pos)
2036{
2037        int i, k = 0;
2038        const int nfrags = skb_shinfo(skb)->nr_frags;
2039
2040        skb_shinfo(skb)->nr_frags = 0;
2041        skb1->len                 = skb1->data_len = skb->len - len;
2042        skb->len                  = len;
2043        skb->data_len             = len - pos;
2044
2045        for (i = 0; i < nfrags; i++) {
2046                int size = skb_shinfo(skb)->frags[i].size;
2047
2048                if (pos + size > len) {
2049                        skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
2050
2051                        if (pos < len) {
2052                                /* Split frag.
2053                                 * We have two variants in this case:
2054                                 * 1. Move all the frag to the second
2055                                 *    part, if it is possible. F.e.
2056                                 *    this approach is mandatory for TUX,
2057                                 *    where splitting is expensive.
2058                                 * 2. Split is accurately. We make this.
2059                                 */
2060                                get_page(skb_shinfo(skb)->frags[i].page);
2061                                skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2062                                skb_shinfo(skb1)->frags[0].size -= len - pos;
2063                                skb_shinfo(skb)->frags[i].size  = len - pos;
2064                                skb_shinfo(skb)->nr_frags++;
2065                        }
2066                        k++;
2067                } else
2068                        skb_shinfo(skb)->nr_frags++;
2069                pos += size;
2070        }
2071        skb_shinfo(skb1)->nr_frags = k;
2072}
2073
2074/**
2075 * skb_split - Split fragmented skb to two parts at length len.
2076 * @skb: the buffer to split
2077 * @skb1: the buffer to receive the second part
2078 * @len: new length for skb
2079 */
2080void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2081{
2082        int pos = skb_headlen(skb);
2083
2084        if (len < pos)  /* Split line is inside header. */
2085                skb_split_inside_header(skb, skb1, len, pos);
2086        else            /* Second chunk has no header, nothing to copy. */
2087                skb_split_no_header(skb, skb1, len, pos);
2088}
2089EXPORT_SYMBOL(skb_split);
2090
2091/* Shifting from/to a cloned skb is a no-go.
2092 *
2093 * Caller cannot keep skb_shinfo related pointers past calling here!
2094 */
2095static int skb_prepare_for_shift(struct sk_buff *skb)
2096{
2097        return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2098}
2099
2100/**
2101 * skb_shift - Shifts paged data partially from skb to another
2102 * @tgt: buffer into which tail data gets added
2103 * @skb: buffer from which the paged data comes from
2104 * @shiftlen: shift up to this many bytes
2105 *
2106 * Attempts to shift up to shiftlen worth of bytes, which may be less than
2107 * the length of the skb, from tgt to skb. Returns number bytes shifted.
2108 * It's up to caller to free skb if everything was shifted.
2109 *
2110 * If @tgt runs out of frags, the whole operation is aborted.
2111 *
2112 * Skb cannot include anything else but paged data while tgt is allowed
2113 * to have non-paged data as well.
2114 *
2115 * TODO: full sized shift could be optimized but that would need
2116 * specialized skb free'er to handle frags without up-to-date nr_frags.
2117 */
2118int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2119{
2120        int from, to, merge, todo;
2121        struct skb_frag_struct *fragfrom, *fragto;
2122
2123        BUG_ON(shiftlen > skb->len);
2124        BUG_ON(skb_headlen(skb));       /* Would corrupt stream */
2125
2126        todo = shiftlen;
2127        from = 0;
2128        to = skb_shinfo(tgt)->nr_frags;
2129        fragfrom = &skb_shinfo(skb)->frags[from];
2130
2131        /* Actual merge is delayed until the point when we know we can
2132         * commit all, so that we don't have to undo partial changes
2133         */
2134        if (!to ||
2135            !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) {
2136                merge = -1;
2137        } else {
2138                merge = to - 1;
2139
2140                todo -= fragfrom->size;
2141                if (todo < 0) {
2142                        if (skb_prepare_for_shift(skb) ||
2143                            skb_prepare_for_shift(tgt))
2144                                return 0;
2145
2146                        /* All previous frag pointers might be stale! */
2147                        fragfrom = &skb_shinfo(skb)->frags[from];
2148                        fragto = &skb_shinfo(tgt)->frags[merge];
2149
2150                        fragto->size += shiftlen;
2151                        fragfrom->size -= shiftlen;
2152                        fragfrom->page_offset += shiftlen;
2153
2154                        goto onlymerged;
2155                }
2156
2157                from++;
2158        }
2159
2160        /* Skip full, not-fitting skb to avoid expensive operations */
2161        if ((shiftlen == skb->len) &&
2162            (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
2163                return 0;
2164
2165        if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2166                return 0;
2167
2168        while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2169                if (to == MAX_SKB_FRAGS)
2170                        return 0;
2171
2172                fragfrom = &skb_shinfo(skb)->frags[from];
2173                fragto = &skb_shinfo(tgt)->frags[to];
2174
2175                if (todo >= fragfrom->size) {
2176                        *fragto = *fragfrom;
2177                        todo -= fragfrom->size;
2178                        from++;
2179                        to++;
2180
2181                } else {
2182                        get_page(fragfrom->page);
2183                        fragto->page = fragfrom->page;
2184                        fragto->page_offset = fragfrom->page_offset;
2185                        fragto->size = todo;
2186
2187                        fragfrom->page_offset += todo;
2188                        fragfrom->size -= todo;
2189                        todo = 0;
2190
2191                        to++;
2192                        break;
2193                }
2194        }
2195
2196        /* Ready to "commit" this state change to tgt */
2197        skb_shinfo(tgt)->nr_frags = to;
2198
2199        if (merge >= 0) {
2200                fragfrom = &skb_shinfo(skb)->frags[0];
2201                fragto = &skb_shinfo(tgt)->frags[merge];
2202
2203                fragto->size += fragfrom->size;
2204                put_page(fragfrom->page);
2205        }
2206
2207        /* Reposition in the original skb */
2208        to = 0;
2209        while (from < skb_shinfo(skb)->nr_frags)
2210                skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2211        skb_shinfo(skb)->nr_frags = to;
2212
2213        BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2214
2215onlymerged:
2216        /* Most likely the tgt won't ever need its checksum anymore, skb on
2217         * the other hand might need it if it needs to be resent
2218         */
2219        tgt->ip_summed = CHECKSUM_PARTIAL;
2220        skb->ip_summed = CHECKSUM_PARTIAL;
2221
2222        /* Yak, is it really working this way? Some helper please? */
2223        skb->len -= shiftlen;
2224        skb->data_len -= shiftlen;
2225        skb->truesize -= shiftlen;
2226        tgt->len += shiftlen;
2227        tgt->data_len += shiftlen;
2228        tgt->truesize += shiftlen;
2229
2230        return shiftlen;
2231}
2232
2233/**
2234 * skb_prepare_seq_read - Prepare a sequential read of skb data
2235 * @skb: the buffer to read
2236 * @from: lower offset of data to be read
2237 * @to: upper offset of data to be read
2238 * @st: state variable
2239 *
2240 * Initializes the specified state variable. Must be called before
2241 * invoking skb_seq_read() for the first time.
2242 */
2243void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2244                          unsigned int to, struct skb_seq_state *st)
2245{
2246        st->lower_offset = from;
2247        st->upper_offset = to;
2248        st->root_skb = st->cur_skb = skb;
2249        st->frag_idx = st->stepped_offset = 0;
2250        st->frag_data = NULL;
2251}
2252EXPORT_SYMBOL(skb_prepare_seq_read);
2253
2254/**
2255 * skb_seq_read - Sequentially read skb data
2256 * @consumed: number of bytes consumed by the caller so far
2257 * @data: destination pointer for data to be returned
2258 * @st: state variable
2259 *
2260 * Reads a block of skb data at &consumed relative to the
2261 * lower offset specified to skb_prepare_seq_read(). Assigns
2262 * the head of the data block to &data and returns the length
2263 * of the block or 0 if the end of the skb data or the upper
2264 * offset has been reached.
2265 *
2266 * The caller is not required to consume all of the data
2267 * returned, i.e. &consumed is typically set to the number
2268 * of bytes already consumed and the next call to
2269 * skb_seq_read() will return the remaining part of the block.
2270 *
2271 * Note 1: The size of each block of data returned can be arbitary,
2272 *       this limitation is the cost for zerocopy seqeuental
2273 *       reads of potentially non linear data.
2274 *
2275 * Note 2: Fragment lists within fragments are not implemented
2276 *       at the moment, state->root_skb could be replaced with
2277 *       a stack for this purpose.
2278 */
2279unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2280                          struct skb_seq_state *st)
2281{
2282        unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2283        skb_frag_t *frag;
2284
2285        if (unlikely(abs_offset >= st->upper_offset))
2286                return 0;
2287
2288next_skb:
2289        block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2290
2291        if (abs_offset < block_limit && !st->frag_data) {
2292                *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2293                return block_limit - abs_offset;
2294        }
2295
2296        if (st->frag_idx == 0 && !st->frag_data)
2297                st->stepped_offset += skb_headlen(st->cur_skb);
2298
2299        while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2300                frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2301                block_limit = frag->size + st->stepped_offset;
2302
2303                if (abs_offset < block_limit) {
2304                        if (!st->frag_data)
2305                                st->frag_data = kmap_skb_frag(frag);
2306
2307                        *data = (u8 *) st->frag_data + frag->page_offset +
2308                                (abs_offset - st->stepped_offset);
2309
2310                        return block_limit - abs_offset;
2311                }
2312
2313                if (st->frag_data) {
2314                        kunmap_skb_frag(st->frag_data);
2315                        st->frag_data = NULL;
2316                }
2317
2318                st->frag_idx++;
2319                st->stepped_offset += frag->size;
2320        }
2321
2322        if (st->frag_data) {
2323                kunmap_skb_frag(st->frag_data);
2324                st->frag_data = NULL;
2325        }
2326
2327        if (st->root_skb == st->cur_skb &&
2328            skb_shinfo(st->root_skb)->frag_list) {
2329                st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2330                st->frag_idx = 0;
2331                goto next_skb;
2332        } else if (st->cur_skb->next) {
2333                st->cur_skb = st->cur_skb->next;
2334                st->frag_idx = 0;
2335                goto next_skb;
2336        }
2337
2338        return 0;
2339}
2340EXPORT_SYMBOL(skb_seq_read);
2341
2342/**
2343 * skb_abort_seq_read - Abort a sequential read of skb data
2344 * @st: state variable
2345 *
2346 * Must be called if skb_seq_read() was not called until it
2347 * returned 0.
2348 */
2349void skb_abort_seq_read(struct skb_seq_state *st)
2350{
2351        if (st->frag_data)
2352                kunmap_skb_frag(st->frag_data);
2353}
2354EXPORT_SYMBOL(skb_abort_seq_read);
2355
2356#define TS_SKB_CB(state)        ((struct skb_seq_state *) &((state)->cb))
2357
2358static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2359                                          struct ts_config *conf,
2360                                          struct ts_state *state)
2361{
2362        return skb_seq_read(offset, text, TS_SKB_CB(state));
2363}
2364
2365static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2366{
2367        skb_abort_seq_read(TS_SKB_CB(state));
2368}
2369
2370/**
2371 * skb_find_text - Find a text pattern in skb data
2372 * @skb: the buffer to look in
2373 * @from: search offset
2374 * @to: search limit
2375 * @config: textsearch configuration
2376 * @state: uninitialized textsearch state variable
2377 *
2378 * Finds a pattern in the skb data according to the specified
2379 * textsearch configuration. Use textsearch_next() to retrieve
2380 * subsequent occurrences of the pattern. Returns the offset
2381 * to the first occurrence or UINT_MAX if no match was found.
2382 */
2383unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2384                           unsigned int to, struct ts_config *config,
2385                           struct ts_state *state)
2386{
2387        unsigned int ret;
2388
2389        config->get_next_block = skb_ts_get_next_block;
2390        config->finish = skb_ts_finish;
2391
2392        skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
2393
2394        ret = textsearch_find(config, state);
2395        return (ret <= to - from ? ret : UINT_MAX);
2396}
2397EXPORT_SYMBOL(skb_find_text);
2398
2399/**
2400 * skb_append_datato_frags: - append the user data to a skb
2401 * @sk: sock  structure
2402 * @skb: skb structure to be appened with user data.
2403 * @getfrag: call back function to be used for getting the user data
2404 * @from: pointer to user message iov
2405 * @length: length of the iov message
2406 *
2407 * Description: This procedure append the user data in the fragment part
2408 * of the skb if any page alloc fails user this procedure returns  -ENOMEM
2409 */
2410int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2411                        int (*getfrag)(void *from, char *to, int offset,
2412                                        int len, int odd, struct sk_buff *skb),
2413                        void *from, int length)
2414{
2415        int frg_cnt = 0;
2416        skb_frag_t *frag = NULL;
2417        struct page *page = NULL;
2418        int copy, left;
2419        int offset = 0;
2420        int ret;
2421
2422        do {
2423                /* Return error if we don't have space for new frag */
2424                frg_cnt = skb_shinfo(skb)->nr_frags;
2425                if (frg_cnt >= MAX_SKB_FRAGS)
2426                        return -EFAULT;
2427
2428                /* allocate a new page for next frag */
2429                page = alloc_pages(sk->sk_allocation, 0);
2430
2431                /* If alloc_page fails just return failure and caller will
2432                 * free previous allocated pages by doing kfree_skb()
2433                 */
2434                if (page == NULL)
2435                        return -ENOMEM;
2436
2437                /* initialize the next frag */
2438                sk->sk_sndmsg_page = page;
2439                sk->sk_sndmsg_off = 0;
2440                skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
2441                skb->truesize += PAGE_SIZE;
2442                atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
2443
2444                /* get the new initialized frag */
2445                frg_cnt = skb_shinfo(skb)->nr_frags;
2446                frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
2447
2448                /* copy the user data to page */
2449                left = PAGE_SIZE - frag->page_offset;
2450                copy = (length > left)? left : length;
2451
2452                ret = getfrag(from, (page_address(frag->page) +
2453                            frag->page_offset + frag->size),
2454                            offset, copy, 0, skb);
2455                if (ret < 0)
2456                        return -EFAULT;
2457
2458                /* copy was successful so update the size parameters */
2459                sk->sk_sndmsg_off += copy;
2460                frag->size += copy;
2461                skb->len += copy;
2462                skb->data_len += copy;
2463                offset += copy;
2464                length -= copy;
2465
2466        } while (length > 0);
2467
2468        return 0;
2469}
2470EXPORT_SYMBOL(skb_append_datato_frags);
2471
2472/**
2473 *      skb_pull_rcsum - pull skb and update receive checksum
2474 *      @skb: buffer to update
2475 *      @len: length of data pulled
2476 *
2477 *      This function performs an skb_pull on the packet and updates
2478 *      the CHECKSUM_COMPLETE checksum.  It should be used on
2479 *      receive path processing instead of skb_pull unless you know
2480 *      that the checksum difference is zero (e.g., a valid IP header)
2481 *      or you are setting ip_summed to CHECKSUM_NONE.
2482 */
2483unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2484{
2485        BUG_ON(len > skb->len);
2486        skb->len -= len;
2487        BUG_ON(skb->len < skb->data_len);
2488        skb_postpull_rcsum(skb, skb->data, len);
2489        return skb->data += len;
2490}
2491
2492EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2493
2494/**
2495 *      skb_segment - Perform protocol segmentation on skb.
2496 *      @skb: buffer to segment
2497 *      @features: features for the output path (see dev->features)
2498 *
2499 *      This function performs segmentation on the given skb.  It returns
2500 *      a pointer to the first in a list of new skbs for the segments.
2501 *      In case of error it returns ERR_PTR(err).
2502 */
2503struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2504{
2505        struct sk_buff *segs = NULL;
2506        struct sk_buff *tail = NULL;
2507        struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
2508        unsigned int mss = skb_shinfo(skb)->gso_size;
2509        unsigned int doffset = skb->data - skb_mac_header(skb);
2510        unsigned int offset = doffset;
2511        unsigned int headroom;
2512        unsigned int len;
2513        int sg = features & NETIF_F_SG;
2514        int nfrags = skb_shinfo(skb)->nr_frags;
2515        int err = -ENOMEM;
2516        int i = 0;
2517        int pos;
2518
2519        __skb_push(skb, doffset);
2520        headroom = skb_headroom(skb);
2521        pos = skb_headlen(skb);
2522
2523        do {
2524                struct sk_buff *nskb;
2525                skb_frag_t *frag;
2526                int hsize;
2527                int size;
2528
2529                len = skb->len - offset;
2530                if (len > mss)
2531                        len = mss;
2532
2533                hsize = skb_headlen(skb) - offset;
2534                if (hsize < 0)
2535                        hsize = 0;
2536                if (hsize > len || !sg)
2537                        hsize = len;
2538
2539                if (!hsize && i >= nfrags) {
2540                        BUG_ON(fskb->len != len);
2541
2542                        pos += len;
2543                        nskb = skb_clone(fskb, GFP_ATOMIC);
2544                        fskb = fskb->next;
2545
2546                        if (unlikely(!nskb))
2547                                goto err;
2548
2549                        hsize = skb_end_pointer(nskb) - nskb->head;
2550                        if (skb_cow_head(nskb, doffset + headroom)) {
2551                                kfree_skb(nskb);
2552                                goto err;
2553                        }
2554
2555                        nskb->truesize += skb_end_pointer(nskb) - nskb->head -
2556                                          hsize;
2557                        skb_release_head_state(nskb);
2558                        __skb_push(nskb, doffset);
2559                } else {
2560                        nskb = alloc_skb(hsize + doffset + headroom,
2561                                         GFP_ATOMIC);
2562
2563                        if (unlikely(!nskb))
2564                                goto err;
2565
2566                        skb_reserve(nskb, headroom);
2567                        __skb_put(nskb, doffset);
2568                }
2569
2570                if (segs)
2571                        tail->next = nskb;
2572                else
2573                        segs = nskb;
2574                tail = nskb;
2575
2576                __copy_skb_header(nskb, skb);
2577                nskb->mac_len = skb->mac_len;
2578
2579                skb_reset_mac_header(nskb);
2580                skb_set_network_header(nskb, skb->mac_len);
2581                nskb->transport_header = (nskb->network_header +
2582                                          skb_network_header_len(skb));
2583                skb_copy_from_linear_data(skb, nskb->data, doffset);
2584
2585                if (fskb != skb_shinfo(skb)->frag_list)
2586                        continue;
2587
2588                if (!sg) {
2589                        nskb->ip_summed = CHECKSUM_NONE;
2590                        nskb->csum = skb_copy_and_csum_bits(skb, offset,
2591                                                            skb_put(nskb, len),
2592                                                            len, 0);
2593                        continue;
2594                }
2595
2596                frag = skb_shinfo(nskb)->frags;
2597
2598                skb_copy_from_linear_data_offset(skb, offset,
2599                                                 skb_put(nskb, hsize), hsize);
2600
2601                while (pos < offset + len && i < nfrags) {
2602                        *frag = skb_shinfo(skb)->frags[i];
2603                        get_page(frag->page);
2604                        size = frag->size;
2605
2606                        if (pos < offset) {
2607                                frag->page_offset += offset - pos;
2608                                frag->size -= offset - pos;
2609                        }
2610
2611                        skb_shinfo(nskb)->nr_frags++;
2612
2613                        if (pos + size <= offset + len) {
2614                                i++;
2615                                pos += size;
2616                        } else {
2617                                frag->size -= pos + size - (offset + len);
2618                                goto skip_fraglist;
2619                        }
2620
2621                        frag++;
2622                }
2623
2624                if (pos < offset + len) {
2625                        struct sk_buff *fskb2 = fskb;
2626
2627                        BUG_ON(pos + fskb->len != offset + len);
2628
2629                        pos += fskb->len;
2630                        fskb = fskb->next;
2631
2632                        if (fskb2->next) {
2633                                fskb2 = skb_clone(fskb2, GFP_ATOMIC);
2634                                if (!fskb2)
2635                                        goto err;
2636                        } else
2637                                skb_get(fskb2);
2638
2639                        BUG_ON(skb_shinfo(nskb)->frag_list);
2640                        skb_shinfo(nskb)->frag_list = fskb2;
2641                }
2642
2643skip_fraglist:
2644                nskb->data_len = len - hsize;
2645                nskb->len += nskb->data_len;
2646                nskb->truesize += nskb->data_len;
2647        } while ((offset += len) < skb->len);
2648
2649        return segs;
2650
2651err:
2652        while ((skb = segs)) {
2653                segs = skb->next;
2654                kfree_skb(skb);
2655        }
2656        return ERR_PTR(err);
2657}
2658EXPORT_SYMBOL_GPL(skb_segment);
2659
2660int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2661{
2662        struct sk_buff *p = *head;
2663        struct sk_buff *nskb;
2664        unsigned int headroom;
2665        unsigned int len = skb_gro_len(skb);
2666
2667        if (p->len + len >= 65536)
2668                return -E2BIG;
2669
2670        if (skb_shinfo(p)->frag_list)
2671                goto merge;
2672        else if (skb_headlen(skb) <= skb_gro_offset(skb)) {
2673                if (skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags >
2674                    MAX_SKB_FRAGS)
2675                        return -E2BIG;
2676
2677                skb_shinfo(skb)->frags[0].page_offset +=
2678                        skb_gro_offset(skb) - skb_headlen(skb);
2679                skb_shinfo(skb)->frags[0].size -=
2680                        skb_gro_offset(skb) - skb_headlen(skb);
2681
2682                memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags,
2683                       skb_shinfo(skb)->frags,
2684                       skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
2685
2686                skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags;
2687                skb_shinfo(skb)->nr_frags = 0;
2688
2689                skb->truesize -= skb->data_len;
2690                skb->len -= skb->data_len;
2691                skb->data_len = 0;
2692
2693                NAPI_GRO_CB(skb)->free = 1;
2694                goto done;
2695        }
2696
2697        headroom = skb_headroom(p);
2698        nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p));
2699        if (unlikely(!nskb))
2700                return -ENOMEM;
2701
2702        __copy_skb_header(nskb, p);
2703        nskb->mac_len = p->mac_len;
2704
2705        skb_reserve(nskb, headroom);
2706        __skb_put(nskb, skb_gro_offset(p));
2707
2708        skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
2709        skb_set_network_header(nskb, skb_network_offset(p));
2710        skb_set_transport_header(nskb, skb_transport_offset(p));
2711
2712        __skb_pull(p, skb_gro_offset(p));
2713        memcpy(skb_mac_header(nskb), skb_mac_header(p),
2714               p->data - skb_mac_header(p));
2715
2716        *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
2717        skb_shinfo(nskb)->frag_list = p;
2718        skb_shinfo(nskb)->gso_size = skb_shinfo(p)->gso_size;
2719        skb_header_release(p);
2720        nskb->prev = p;
2721
2722        nskb->data_len += p->len;
2723        nskb->truesize += p->len;
2724        nskb->len += p->len;
2725
2726        *head = nskb;
2727        nskb->next = p->next;
2728        p->next = NULL;
2729
2730        p = nskb;
2731
2732merge:
2733        if (skb_gro_offset(skb) > skb_headlen(skb)) {
2734                skb_shinfo(skb)->frags[0].page_offset +=
2735                        skb_gro_offset(skb) - skb_headlen(skb);
2736                skb_shinfo(skb)->frags[0].size -=
2737                        skb_gro_offset(skb) - skb_headlen(skb);
2738                skb_gro_reset_offset(skb);
2739                skb_gro_pull(skb, skb_headlen(skb));
2740        }
2741
2742        __skb_pull(skb, skb_gro_offset(skb));
2743
2744        p->prev->next = skb;
2745        p->prev = skb;
2746        skb_header_release(skb);
2747
2748done:
2749        NAPI_GRO_CB(p)->count++;
2750        p->data_len += len;
2751        p->truesize += len;
2752        p->len += len;
2753
2754        NAPI_GRO_CB(skb)->same_flow = 1;
2755        return 0;
2756}
2757EXPORT_SYMBOL_GPL(skb_gro_receive);
2758
2759void __init skb_init(void)
2760{
2761        skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
2762                                              sizeof(struct sk_buff),
2763                                              0,
2764                                              SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2765                                              NULL);
2766        skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
2767                                                (2*sizeof(struct sk_buff)) +
2768                                                sizeof(atomic_t),
2769                                                0,
2770                                                SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2771                                                NULL);
2772}
2773
2774/**
2775 *      skb_to_sgvec - Fill a scatter-gather list from a socket buffer
2776 *      @skb: Socket buffer containing the buffers to be mapped
2777 *      @sg: The scatter-gather list to map into
2778 *      @offset: The offset into the buffer's contents to start mapping
2779 *      @len: Length of buffer space to be mapped
2780 *
2781 *      Fill the specified scatter-gather list with mappings/pointers into a
2782 *      region of the buffer space attached to a socket buffer.
2783 */
2784static int
2785__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2786{
2787        int start = skb_headlen(skb);
2788        int i, copy = start - offset;
2789        int elt = 0;
2790
2791        if (copy > 0) {
2792                if (copy > len)
2793                        copy = len;
2794                sg_set_buf(sg, skb->data + offset, copy);
2795                elt++;
2796                if ((len -= copy) == 0)
2797                        return elt;
2798                offset += copy;
2799        }
2800
2801        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2802                int end;
2803
2804                WARN_ON(start > offset + len);
2805
2806                end = start + skb_shinfo(skb)->frags[i].size;
2807                if ((copy = end - offset) > 0) {
2808                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2809
2810                        if (copy > len)
2811                                copy = len;
2812                        sg_set_page(&sg[elt], frag->page, copy,
2813                                        frag->page_offset+offset-start);
2814                        elt++;
2815                        if (!(len -= copy))
2816                                return elt;
2817                        offset += copy;
2818                }
2819                start = end;
2820        }
2821
2822        if (skb_shinfo(skb)->frag_list) {
2823                struct sk_buff *list = skb_shinfo(skb)->frag_list;
2824
2825                for (; list; list = list->next) {
2826                        int end;
2827
2828                        WARN_ON(start > offset + len);
2829
2830                        end = start + list->len;
2831                        if ((copy = end - offset) > 0) {
2832                                if (copy > len)
2833                                        copy = len;
2834                                elt += __skb_to_sgvec(list, sg+elt, offset - start,
2835                                                      copy);
2836                                if ((len -= copy) == 0)
2837                                        return elt;
2838                                offset += copy;
2839                        }
2840                        start = end;
2841                }
2842        }
2843        BUG_ON(len);
2844        return elt;
2845}
2846
2847int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2848{
2849        int nsg = __skb_to_sgvec(skb, sg, offset, len);
2850
2851        sg_mark_end(&sg[nsg - 1]);
2852
2853        return nsg;
2854}
2855EXPORT_SYMBOL_GPL(skb_to_sgvec);
2856
2857/**
2858 *      skb_cow_data - Check that a socket buffer's data buffers are writable
2859 *      @skb: The socket buffer to check.
2860 *      @tailbits: Amount of trailing space to be added
2861 *      @trailer: Returned pointer to the skb where the @tailbits space begins
2862 *
2863 *      Make sure that the data buffers attached to a socket buffer are
2864 *      writable. If they are not, private copies are made of the data buffers
2865 *      and the socket buffer is set to use these instead.
2866 *
2867 *      If @tailbits is given, make sure that there is space to write @tailbits
2868 *      bytes of data beyond current end of socket buffer.  @trailer will be
2869 *      set to point to the skb in which this space begins.
2870 *
2871 *      The number of scatterlist elements required to completely map the
2872 *      COW'd and extended socket buffer will be returned.
2873 */
2874int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2875{
2876        int copyflag;
2877        int elt;
2878        struct sk_buff *skb1, **skb_p;
2879
2880        /* If skb is cloned or its head is paged, reallocate
2881         * head pulling out all the pages (pages are considered not writable
2882         * at the moment even if they are anonymous).
2883         */
2884        if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
2885            __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
2886                return -ENOMEM;
2887
2888        /* Easy case. Most of packets will go this way. */
2889        if (!skb_shinfo(skb)->frag_list) {
2890                /* A little of trouble, not enough of space for trailer.
2891                 * This should not happen, when stack is tuned to generate
2892                 * good frames. OK, on miss we reallocate and reserve even more
2893                 * space, 128 bytes is fair. */
2894
2895                if (skb_tailroom(skb) < tailbits &&
2896                    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
2897                        return -ENOMEM;
2898
2899                /* Voila! */
2900                *trailer = skb;
2901                return 1;
2902        }
2903
2904        /* Misery. We are in troubles, going to mincer fragments... */
2905
2906        elt = 1;
2907        skb_p = &skb_shinfo(skb)->frag_list;
2908        copyflag = 0;
2909
2910        while ((skb1 = *skb_p) != NULL) {
2911                int ntail = 0;
2912
2913                /* The fragment is partially pulled by someone,
2914                 * this can happen on input. Copy it and everything
2915                 * after it. */
2916
2917                if (skb_shared(skb1))
2918                        copyflag = 1;
2919
2920                /* If the skb is the last, worry about trailer. */
2921
2922                if (skb1->next == NULL && tailbits) {
2923                        if (skb_shinfo(skb1)->nr_frags ||
2924                            skb_shinfo(skb1)->frag_list ||
2925                            skb_tailroom(skb1) < tailbits)
2926                                ntail = tailbits + 128;
2927                }
2928
2929                if (copyflag ||
2930                    skb_cloned(skb1) ||
2931                    ntail ||
2932                    skb_shinfo(skb1)->nr_frags ||
2933                    skb_shinfo(skb1)->frag_list) {
2934                        struct sk_buff *skb2;
2935
2936                        /* Fuck, we are miserable poor guys... */
2937                        if (ntail == 0)
2938                                skb2 = skb_copy(skb1, GFP_ATOMIC);
2939                        else
2940                                skb2 = skb_copy_expand(skb1,
2941                                                       skb_headroom(skb1),
2942                                                       ntail,
2943                                                       GFP_ATOMIC);
2944                        if (unlikely(skb2 == NULL))
2945                                return -ENOMEM;
2946
2947                        if (skb1->sk)
2948                                skb_set_owner_w(skb2, skb1->sk);
2949
2950                        /* Looking around. Are we still alive?
2951                         * OK, link new skb, drop old one */
2952
2953                        skb2->next = skb1->next;
2954                        *skb_p = skb2;
2955                        kfree_skb(skb1);
2956                        skb1 = skb2;
2957                }
2958                elt++;
2959                *trailer = skb1;
2960                skb_p = &skb1->next;
2961        }
2962
2963        return elt;
2964}
2965EXPORT_SYMBOL_GPL(skb_cow_data);
2966
2967void skb_tstamp_tx(struct sk_buff *orig_skb,
2968                struct skb_shared_hwtstamps *hwtstamps)
2969{
2970        struct sock *sk = orig_skb->sk;
2971        struct sock_exterr_skb *serr;
2972        struct sk_buff *skb;
2973        int err;
2974
2975        if (!sk)
2976                return;
2977
2978        skb = skb_clone(orig_skb, GFP_ATOMIC);
2979        if (!skb)
2980                return;
2981
2982        if (hwtstamps) {
2983                *skb_hwtstamps(skb) =
2984                        *hwtstamps;
2985        } else {
2986                /*
2987                 * no hardware time stamps available,
2988                 * so keep the skb_shared_tx and only
2989                 * store software time stamp
2990                 */
2991                skb->tstamp = ktime_get_real();
2992        }
2993
2994        serr = SKB_EXT_ERR(skb);
2995        memset(serr, 0, sizeof(*serr));
2996        serr->ee.ee_errno = ENOMSG;
2997        serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
2998        err = sock_queue_err_skb(sk, skb);
2999        if (err)
3000                kfree_skb(skb);
3001}
3002EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3003
3004
3005/**
3006 * skb_partial_csum_set - set up and verify partial csum values for packet
3007 * @skb: the skb to set
3008 * @start: the number of bytes after skb->data to start checksumming.
3009 * @off: the offset from start to place the checksum.
3010 *
3011 * For untrusted partially-checksummed packets, we need to make sure the values
3012 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
3013 *
3014 * This function checks and sets those values and skb->ip_summed: if this
3015 * returns false you should drop the packet.
3016 */
3017bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3018{
3019        if (unlikely(start > skb->len - 2) ||
3020            unlikely((int)start + off > skb->len - 2)) {
3021                if (net_ratelimit())
3022                        printk(KERN_WARNING
3023                               "bad partial csum: csum=%u/%u len=%u\n",
3024                               start, off, skb->len);
3025                return false;
3026        }
3027        skb->ip_summed = CHECKSUM_PARTIAL;
3028        skb->csum_start = skb_headroom(skb) + start;
3029        skb->csum_offset = off;
3030        return true;
3031}
3032EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3033
3034void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3035{
3036        if (net_ratelimit())
3037                pr_warning("%s: received packets cannot be forwarded"
3038                           " while LRO is enabled\n", skb->dev->name);
3039}
3040EXPORT_SYMBOL(__skb_warn_lro_forwarding);
3041