linux/include/net/sock.h
<<
>>
Prefs
   1/*
   2 * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3 *              operating system.  INET is implemented using the  BSD Socket
   4 *              interface as the means of communication with the user level.
   5 *
   6 *              Definitions for the AF_INET socket handler.
   7 *
   8 * Version:     @(#)sock.h      1.0.4   05/13/93
   9 *
  10 * Authors:     Ross Biro
  11 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *              Corey Minyard <wf-rch!minyard@relay.EU.net>
  13 *              Florian La Roche <flla@stud.uni-sb.de>
  14 *
  15 * Fixes:
  16 *              Alan Cox        :       Volatiles in skbuff pointers. See
  17 *                                      skbuff comments. May be overdone,
  18 *                                      better to prove they can be removed
  19 *                                      than the reverse.
  20 *              Alan Cox        :       Added a zapped field for tcp to note
  21 *                                      a socket is reset and must stay shut up
  22 *              Alan Cox        :       New fields for options
  23 *      Pauline Middelink       :       identd support
  24 *              Alan Cox        :       Eliminate low level recv/recvfrom
  25 *              David S. Miller :       New socket lookup architecture.
  26 *              Steve Whitehouse:       Default routines for sock_ops
  27 *              Arnaldo C. Melo :       removed net_pinfo, tp_pinfo and made
  28 *                                      protinfo be just a void pointer, as the
  29 *                                      protocol specific parts were moved to
  30 *                                      respective headers and ipv4/v6, etc now
  31 *                                      use private slabcaches for its socks
  32 *              Pedro Hortas    :       New flags field for socket options
  33 *
  34 *
  35 *              This program is free software; you can redistribute it and/or
  36 *              modify it under the terms of the GNU General Public License
  37 *              as published by the Free Software Foundation; either version
  38 *              2 of the License, or (at your option) any later version.
  39 */
  40#ifndef _SOCK_H
  41#define _SOCK_H
  42
  43#include <linux/hardirq.h>
  44#include <linux/kernel.h>
  45#include <linux/list.h>
  46#include <linux/list_nulls.h>
  47#include <linux/timer.h>
  48#include <linux/cache.h>
  49#include <linux/bitops.h>
  50#include <linux/lockdep.h>
  51#include <linux/netdevice.h>
  52#include <linux/skbuff.h>       /* struct sk_buff */
  53#include <linux/mm.h>
  54#include <linux/security.h>
  55#include <linux/slab.h>
  56#include <linux/uaccess.h>
  57#include <linux/page_counter.h>
  58#include <linux/memcontrol.h>
  59#include <linux/static_key.h>
  60#include <linux/aio.h>
  61#include <linux/sched.h>
  62
  63#include <linux/filter.h>
  64#include <linux/rculist_nulls.h>
  65#include <linux/poll.h>
  66
  67#include <linux/atomic.h>
  68#include <net/dst.h>
  69#include <net/checksum.h>
  70#include <linux/net_tstamp.h>
  71
  72struct cgroup;
  73struct cgroup_subsys;
  74#ifdef CONFIG_NET
  75int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss);
  76void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg);
  77#else
  78static inline
  79int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
  80{
  81        return 0;
  82}
  83static inline
  84void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
  85{
  86}
  87#endif
  88/*
  89 * This structure really needs to be cleaned up.
  90 * Most of it is for TCP, and not used by any of
  91 * the other protocols.
  92 */
  93
  94/* Define this to get the SOCK_DBG debugging facility. */
  95#define SOCK_DEBUGGING
  96#ifdef SOCK_DEBUGGING
  97#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
  98                                        printk(KERN_DEBUG msg); } while (0)
  99#else
 100/* Validate arguments and do nothing */
 101static inline __printf(2, 3)
 102void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
 103{
 104}
 105#endif
 106
 107/* This is the per-socket lock.  The spinlock provides a synchronization
 108 * between user contexts and software interrupt processing, whereas the
 109 * mini-semaphore synchronizes multiple users amongst themselves.
 110 */
 111typedef struct {
 112        spinlock_t              slock;
 113        int                     owned;
 114        wait_queue_head_t       wq;
 115        /*
 116         * We express the mutex-alike socket_lock semantics
 117         * to the lock validator by explicitly managing
 118         * the slock as a lock variant (in addition to
 119         * the slock itself):
 120         */
 121#ifdef CONFIG_DEBUG_LOCK_ALLOC
 122        struct lockdep_map dep_map;
 123#endif
 124} socket_lock_t;
 125
 126struct sock;
 127struct proto;
 128struct net;
 129
 130typedef __u32 __bitwise __portpair;
 131typedef __u64 __bitwise __addrpair;
 132
 133/**
 134 *      struct sock_common - minimal network layer representation of sockets
 135 *      @skc_daddr: Foreign IPv4 addr
 136 *      @skc_rcv_saddr: Bound local IPv4 addr
 137 *      @skc_hash: hash value used with various protocol lookup tables
 138 *      @skc_u16hashes: two u16 hash values used by UDP lookup tables
 139 *      @skc_dport: placeholder for inet_dport/tw_dport
 140 *      @skc_num: placeholder for inet_num/tw_num
 141 *      @skc_family: network address family
 142 *      @skc_state: Connection state
 143 *      @skc_reuse: %SO_REUSEADDR setting
 144 *      @skc_reuseport: %SO_REUSEPORT setting
 145 *      @skc_bound_dev_if: bound device index if != 0
 146 *      @skc_bind_node: bind hash linkage for various protocol lookup tables
 147 *      @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
 148 *      @skc_prot: protocol handlers inside a network family
 149 *      @skc_net: reference to the network namespace of this socket
 150 *      @skc_node: main hash linkage for various protocol lookup tables
 151 *      @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
 152 *      @skc_tx_queue_mapping: tx queue number for this connection
 153 *      @skc_refcnt: reference count
 154 *
 155 *      This is the minimal network layer representation of sockets, the header
 156 *      for struct sock and struct inet_timewait_sock.
 157 */
 158struct sock_common {
 159        /* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned
 160         * address on 64bit arches : cf INET_MATCH()
 161         */
 162        union {
 163                __addrpair      skc_addrpair;
 164                struct {
 165                        __be32  skc_daddr;
 166                        __be32  skc_rcv_saddr;
 167                };
 168        };
 169        union  {
 170                unsigned int    skc_hash;
 171                __u16           skc_u16hashes[2];
 172        };
 173        /* skc_dport && skc_num must be grouped as well */
 174        union {
 175                __portpair      skc_portpair;
 176                struct {
 177                        __be16  skc_dport;
 178                        __u16   skc_num;
 179                };
 180        };
 181
 182        unsigned short          skc_family;
 183        volatile unsigned char  skc_state;
 184        unsigned char           skc_reuse:4;
 185        unsigned char           skc_reuseport:1;
 186        unsigned char           skc_ipv6only:1;
 187        int                     skc_bound_dev_if;
 188        union {
 189                struct hlist_node       skc_bind_node;
 190                struct hlist_nulls_node skc_portaddr_node;
 191        };
 192        struct proto            *skc_prot;
 193#ifdef CONFIG_NET_NS
 194        struct net              *skc_net;
 195#endif
 196
 197#if IS_ENABLED(CONFIG_IPV6)
 198        struct in6_addr         skc_v6_daddr;
 199        struct in6_addr         skc_v6_rcv_saddr;
 200#endif
 201
 202        /*
 203         * fields between dontcopy_begin/dontcopy_end
 204         * are not copied in sock_copy()
 205         */
 206        /* private: */
 207        int                     skc_dontcopy_begin[0];
 208        /* public: */
 209        union {
 210                struct hlist_node       skc_node;
 211                struct hlist_nulls_node skc_nulls_node;
 212        };
 213        int                     skc_tx_queue_mapping;
 214        atomic_t                skc_refcnt;
 215        /* private: */
 216        int                     skc_dontcopy_end[0];
 217        /* public: */
 218};
 219
 220struct cg_proto;
 221/**
 222  *     struct sock - network layer representation of sockets
 223  *     @__sk_common: shared layout with inet_timewait_sock
 224  *     @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
 225  *     @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
 226  *     @sk_lock:       synchronizer
 227  *     @sk_rcvbuf: size of receive buffer in bytes
 228  *     @sk_wq: sock wait queue and async head
 229  *     @sk_rx_dst: receive input route used by early demux
 230  *     @sk_dst_cache: destination cache
 231  *     @sk_dst_lock: destination cache lock
 232  *     @sk_policy: flow policy
 233  *     @sk_receive_queue: incoming packets
 234  *     @sk_wmem_alloc: transmit queue bytes committed
 235  *     @sk_write_queue: Packet sending queue
 236  *     @sk_omem_alloc: "o" is "option" or "other"
 237  *     @sk_wmem_queued: persistent queue size
 238  *     @sk_forward_alloc: space allocated forward
 239  *     @sk_napi_id: id of the last napi context to receive data for sk
 240  *     @sk_ll_usec: usecs to busypoll when there is no data
 241  *     @sk_allocation: allocation mode
 242  *     @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
 243  *     @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
 244  *     @sk_sndbuf: size of send buffer in bytes
 245  *     @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
 246  *                %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
 247  *     @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
 248  *     @sk_no_check_rx: allow zero checksum in RX packets
 249  *     @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
 250  *     @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
 251  *     @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
 252  *     @sk_gso_max_size: Maximum GSO segment size to build
 253  *     @sk_gso_max_segs: Maximum number of GSO segments
 254  *     @sk_lingertime: %SO_LINGER l_linger setting
 255  *     @sk_backlog: always used with the per-socket spinlock held
 256  *     @sk_callback_lock: used with the callbacks in the end of this struct
 257  *     @sk_error_queue: rarely used
 258  *     @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
 259  *                       IPV6_ADDRFORM for instance)
 260  *     @sk_err: last error
 261  *     @sk_err_soft: errors that don't cause failure but are the cause of a
 262  *                   persistent failure not just 'timed out'
 263  *     @sk_drops: raw/udp drops counter
 264  *     @sk_ack_backlog: current listen backlog
 265  *     @sk_max_ack_backlog: listen backlog set in listen()
 266  *     @sk_priority: %SO_PRIORITY setting
 267  *     @sk_cgrp_prioidx: socket group's priority map index
 268  *     @sk_type: socket type (%SOCK_STREAM, etc)
 269  *     @sk_protocol: which protocol this socket belongs in this network family
 270  *     @sk_peer_pid: &struct pid for this socket's peer
 271  *     @sk_peer_cred: %SO_PEERCRED setting
 272  *     @sk_rcvlowat: %SO_RCVLOWAT setting
 273  *     @sk_rcvtimeo: %SO_RCVTIMEO setting
 274  *     @sk_sndtimeo: %SO_SNDTIMEO setting
 275  *     @sk_rxhash: flow hash received from netif layer
 276  *     @sk_incoming_cpu: record cpu processing incoming packets
 277  *     @sk_txhash: computed flow hash for use on transmit
 278  *     @sk_filter: socket filtering instructions
 279  *     @sk_protinfo: private area, net family specific, when not using slab
 280  *     @sk_timer: sock cleanup timer
 281  *     @sk_stamp: time stamp of last packet received
 282  *     @sk_tsflags: SO_TIMESTAMPING socket options
 283  *     @sk_tskey: counter to disambiguate concurrent tstamp requests
 284  *     @sk_socket: Identd and reporting IO signals
 285  *     @sk_user_data: RPC layer private data
 286  *     @sk_frag: cached page frag
 287  *     @sk_peek_off: current peek_offset value
 288  *     @sk_send_head: front of stuff to transmit
 289  *     @sk_security: used by security modules
 290  *     @sk_mark: generic packet mark
 291  *     @sk_classid: this socket's cgroup classid
 292  *     @sk_cgrp: this socket's cgroup-specific proto data
 293  *     @sk_write_pending: a write to stream socket waits to start
 294  *     @sk_state_change: callback to indicate change in the state of the sock
 295  *     @sk_data_ready: callback to indicate there is data to be processed
 296  *     @sk_write_space: callback to indicate there is bf sending space available
 297  *     @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
 298  *     @sk_backlog_rcv: callback to process the backlog
 299  *     @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
 300 */
 301struct sock {
 302        /*
 303         * Now struct inet_timewait_sock also uses sock_common, so please just
 304         * don't add nothing before this first member (__sk_common) --acme
 305         */
 306        struct sock_common      __sk_common;
 307#define sk_node                 __sk_common.skc_node
 308#define sk_nulls_node           __sk_common.skc_nulls_node
 309#define sk_refcnt               __sk_common.skc_refcnt
 310#define sk_tx_queue_mapping     __sk_common.skc_tx_queue_mapping
 311
 312#define sk_dontcopy_begin       __sk_common.skc_dontcopy_begin
 313#define sk_dontcopy_end         __sk_common.skc_dontcopy_end
 314#define sk_hash                 __sk_common.skc_hash
 315#define sk_portpair             __sk_common.skc_portpair
 316#define sk_num                  __sk_common.skc_num
 317#define sk_dport                __sk_common.skc_dport
 318#define sk_addrpair             __sk_common.skc_addrpair
 319#define sk_daddr                __sk_common.skc_daddr
 320#define sk_rcv_saddr            __sk_common.skc_rcv_saddr
 321#define sk_family               __sk_common.skc_family
 322#define sk_state                __sk_common.skc_state
 323#define sk_reuse                __sk_common.skc_reuse
 324#define sk_reuseport            __sk_common.skc_reuseport
 325#define sk_ipv6only             __sk_common.skc_ipv6only
 326#define sk_bound_dev_if         __sk_common.skc_bound_dev_if
 327#define sk_bind_node            __sk_common.skc_bind_node
 328#define sk_prot                 __sk_common.skc_prot
 329#define sk_net                  __sk_common.skc_net
 330#define sk_v6_daddr             __sk_common.skc_v6_daddr
 331#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
 332
 333        socket_lock_t           sk_lock;
 334        struct sk_buff_head     sk_receive_queue;
 335        /*
 336         * The backlog queue is special, it is always used with
 337         * the per-socket spinlock held and requires low latency
 338         * access. Therefore we special case it's implementation.
 339         * Note : rmem_alloc is in this structure to fill a hole
 340         * on 64bit arches, not because its logically part of
 341         * backlog.
 342         */
 343        struct {
 344                atomic_t        rmem_alloc;
 345                int             len;
 346                struct sk_buff  *head;
 347                struct sk_buff  *tail;
 348        } sk_backlog;
 349#define sk_rmem_alloc sk_backlog.rmem_alloc
 350        int                     sk_forward_alloc;
 351#ifdef CONFIG_RPS
 352        __u32                   sk_rxhash;
 353#endif
 354        u16                     sk_incoming_cpu;
 355        /* 16bit hole
 356         * Warned : sk_incoming_cpu can be set from softirq,
 357         * Do not use this hole without fully understanding possible issues.
 358         */
 359
 360        __u32                   sk_txhash;
 361#ifdef CONFIG_NET_RX_BUSY_POLL
 362        unsigned int            sk_napi_id;
 363        unsigned int            sk_ll_usec;
 364#endif
 365        atomic_t                sk_drops;
 366        int                     sk_rcvbuf;
 367
 368        struct sk_filter __rcu  *sk_filter;
 369        struct socket_wq __rcu  *sk_wq;
 370
 371#ifdef CONFIG_XFRM
 372        struct xfrm_policy      *sk_policy[2];
 373#endif
 374        unsigned long           sk_flags;
 375        struct dst_entry        *sk_rx_dst;
 376        struct dst_entry __rcu  *sk_dst_cache;
 377        spinlock_t              sk_dst_lock;
 378        atomic_t                sk_wmem_alloc;
 379        atomic_t                sk_omem_alloc;
 380        int                     sk_sndbuf;
 381        struct sk_buff_head     sk_write_queue;
 382        kmemcheck_bitfield_begin(flags);
 383        unsigned int            sk_shutdown  : 2,
 384                                sk_no_check_tx : 1,
 385                                sk_no_check_rx : 1,
 386                                sk_userlocks : 4,
 387                                sk_protocol  : 8,
 388                                sk_type      : 16;
 389        kmemcheck_bitfield_end(flags);
 390        int                     sk_wmem_queued;
 391        gfp_t                   sk_allocation;
 392        u32                     sk_pacing_rate; /* bytes per second */
 393        u32                     sk_max_pacing_rate;
 394        netdev_features_t       sk_route_caps;
 395        netdev_features_t       sk_route_nocaps;
 396        int                     sk_gso_type;
 397        unsigned int            sk_gso_max_size;
 398        u16                     sk_gso_max_segs;
 399        int                     sk_rcvlowat;
 400        unsigned long           sk_lingertime;
 401        struct sk_buff_head     sk_error_queue;
 402        struct proto            *sk_prot_creator;
 403        rwlock_t                sk_callback_lock;
 404        int                     sk_err,
 405                                sk_err_soft;
 406        unsigned short          sk_ack_backlog;
 407        unsigned short          sk_max_ack_backlog;
 408        __u32                   sk_priority;
 409#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
 410        __u32                   sk_cgrp_prioidx;
 411#endif
 412        struct pid              *sk_peer_pid;
 413        const struct cred       *sk_peer_cred;
 414        long                    sk_rcvtimeo;
 415        long                    sk_sndtimeo;
 416        void                    *sk_protinfo;
 417        struct timer_list       sk_timer;
 418        ktime_t                 sk_stamp;
 419        u16                     sk_tsflags;
 420        u32                     sk_tskey;
 421        struct socket           *sk_socket;
 422        void                    *sk_user_data;
 423        struct page_frag        sk_frag;
 424        struct sk_buff          *sk_send_head;
 425        __s32                   sk_peek_off;
 426        int                     sk_write_pending;
 427#ifdef CONFIG_SECURITY
 428        void                    *sk_security;
 429#endif
 430        __u32                   sk_mark;
 431        u32                     sk_classid;
 432        struct cg_proto         *sk_cgrp;
 433        void                    (*sk_state_change)(struct sock *sk);
 434        void                    (*sk_data_ready)(struct sock *sk);
 435        void                    (*sk_write_space)(struct sock *sk);
 436        void                    (*sk_error_report)(struct sock *sk);
 437        int                     (*sk_backlog_rcv)(struct sock *sk,
 438                                                  struct sk_buff *skb);
 439        void                    (*sk_destruct)(struct sock *sk);
 440};
 441
 442#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
 443
 444#define rcu_dereference_sk_user_data(sk)        rcu_dereference(__sk_user_data((sk)))
 445#define rcu_assign_sk_user_data(sk, ptr)        rcu_assign_pointer(__sk_user_data((sk)), ptr)
 446
 447/*
 448 * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
 449 * or not whether his port will be reused by someone else. SK_FORCE_REUSE
 450 * on a socket means that the socket will reuse everybody else's port
 451 * without looking at the other's sk_reuse value.
 452 */
 453
 454#define SK_NO_REUSE     0
 455#define SK_CAN_REUSE    1
 456#define SK_FORCE_REUSE  2
 457
 458static inline int sk_peek_offset(struct sock *sk, int flags)
 459{
 460        if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0))
 461                return sk->sk_peek_off;
 462        else
 463                return 0;
 464}
 465
 466static inline void sk_peek_offset_bwd(struct sock *sk, int val)
 467{
 468        if (sk->sk_peek_off >= 0) {
 469                if (sk->sk_peek_off >= val)
 470                        sk->sk_peek_off -= val;
 471                else
 472                        sk->sk_peek_off = 0;
 473        }
 474}
 475
 476static inline void sk_peek_offset_fwd(struct sock *sk, int val)
 477{
 478        if (sk->sk_peek_off >= 0)
 479                sk->sk_peek_off += val;
 480}
 481
 482/*
 483 * Hashed lists helper routines
 484 */
 485static inline struct sock *sk_entry(const struct hlist_node *node)
 486{
 487        return hlist_entry(node, struct sock, sk_node);
 488}
 489
 490static inline struct sock *__sk_head(const struct hlist_head *head)
 491{
 492        return hlist_entry(head->first, struct sock, sk_node);
 493}
 494
 495static inline struct sock *sk_head(const struct hlist_head *head)
 496{
 497        return hlist_empty(head) ? NULL : __sk_head(head);
 498}
 499
 500static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
 501{
 502        return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
 503}
 504
 505static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
 506{
 507        return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
 508}
 509
 510static inline struct sock *sk_next(const struct sock *sk)
 511{
 512        return sk->sk_node.next ?
 513                hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
 514}
 515
 516static inline struct sock *sk_nulls_next(const struct sock *sk)
 517{
 518        return (!is_a_nulls(sk->sk_nulls_node.next)) ?
 519                hlist_nulls_entry(sk->sk_nulls_node.next,
 520                                  struct sock, sk_nulls_node) :
 521                NULL;
 522}
 523
 524static inline bool sk_unhashed(const struct sock *sk)
 525{
 526        return hlist_unhashed(&sk->sk_node);
 527}
 528
 529static inline bool sk_hashed(const struct sock *sk)
 530{
 531        return !sk_unhashed(sk);
 532}
 533
 534static inline void sk_node_init(struct hlist_node *node)
 535{
 536        node->pprev = NULL;
 537}
 538
 539static inline void sk_nulls_node_init(struct hlist_nulls_node *node)
 540{
 541        node->pprev = NULL;
 542}
 543
 544static inline void __sk_del_node(struct sock *sk)
 545{
 546        __hlist_del(&sk->sk_node);
 547}
 548
 549/* NB: equivalent to hlist_del_init_rcu */
 550static inline bool __sk_del_node_init(struct sock *sk)
 551{
 552        if (sk_hashed(sk)) {
 553                __sk_del_node(sk);
 554                sk_node_init(&sk->sk_node);
 555                return true;
 556        }
 557        return false;
 558}
 559
 560/* Grab socket reference count. This operation is valid only
 561   when sk is ALREADY grabbed f.e. it is found in hash table
 562   or a list and the lookup is made under lock preventing hash table
 563   modifications.
 564 */
 565
 566static inline void sock_hold(struct sock *sk)
 567{
 568        atomic_inc(&sk->sk_refcnt);
 569}
 570
 571/* Ungrab socket in the context, which assumes that socket refcnt
 572   cannot hit zero, f.e. it is true in context of any socketcall.
 573 */
 574static inline void __sock_put(struct sock *sk)
 575{
 576        atomic_dec(&sk->sk_refcnt);
 577}
 578
 579static inline bool sk_del_node_init(struct sock *sk)
 580{
 581        bool rc = __sk_del_node_init(sk);
 582
 583        if (rc) {
 584                /* paranoid for a while -acme */
 585                WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
 586                __sock_put(sk);
 587        }
 588        return rc;
 589}
 590#define sk_del_node_init_rcu(sk)        sk_del_node_init(sk)
 591
 592static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk)
 593{
 594        if (sk_hashed(sk)) {
 595                hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
 596                return true;
 597        }
 598        return false;
 599}
 600
 601static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
 602{
 603        bool rc = __sk_nulls_del_node_init_rcu(sk);
 604
 605        if (rc) {
 606                /* paranoid for a while -acme */
 607                WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
 608                __sock_put(sk);
 609        }
 610        return rc;
 611}
 612
 613static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
 614{
 615        hlist_add_head(&sk->sk_node, list);
 616}
 617
 618static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
 619{
 620        sock_hold(sk);
 621        __sk_add_node(sk, list);
 622}
 623
 624static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
 625{
 626        sock_hold(sk);
 627        hlist_add_head_rcu(&sk->sk_node, list);
 628}
 629
 630static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
 631{
 632        hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
 633}
 634
 635static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
 636{
 637        sock_hold(sk);
 638        __sk_nulls_add_node_rcu(sk, list);
 639}
 640
 641static inline void __sk_del_bind_node(struct sock *sk)
 642{
 643        __hlist_del(&sk->sk_bind_node);
 644}
 645
 646static inline void sk_add_bind_node(struct sock *sk,
 647                                        struct hlist_head *list)
 648{
 649        hlist_add_head(&sk->sk_bind_node, list);
 650}
 651
 652#define sk_for_each(__sk, list) \
 653        hlist_for_each_entry(__sk, list, sk_node)
 654#define sk_for_each_rcu(__sk, list) \
 655        hlist_for_each_entry_rcu(__sk, list, sk_node)
 656#define sk_nulls_for_each(__sk, node, list) \
 657        hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
 658#define sk_nulls_for_each_rcu(__sk, node, list) \
 659        hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
 660#define sk_for_each_from(__sk) \
 661        hlist_for_each_entry_from(__sk, sk_node)
 662#define sk_nulls_for_each_from(__sk, node) \
 663        if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
 664                hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
 665#define sk_for_each_safe(__sk, tmp, list) \
 666        hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
 667#define sk_for_each_bound(__sk, list) \
 668        hlist_for_each_entry(__sk, list, sk_bind_node)
 669
 670/**
 671 * sk_nulls_for_each_entry_offset - iterate over a list at a given struct offset
 672 * @tpos:       the type * to use as a loop cursor.
 673 * @pos:        the &struct hlist_node to use as a loop cursor.
 674 * @head:       the head for your list.
 675 * @offset:     offset of hlist_node within the struct.
 676 *
 677 */
 678#define sk_nulls_for_each_entry_offset(tpos, pos, head, offset)                \
 679        for (pos = (head)->first;                                              \
 680             (!is_a_nulls(pos)) &&                                             \
 681                ({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;});       \
 682             pos = pos->next)
 683
 684static inline struct user_namespace *sk_user_ns(struct sock *sk)
 685{
 686        /* Careful only use this in a context where these parameters
 687         * can not change and must all be valid, such as recvmsg from
 688         * userspace.
 689         */
 690        return sk->sk_socket->file->f_cred->user_ns;
 691}
 692
 693/* Sock flags */
 694enum sock_flags {
 695        SOCK_DEAD,
 696        SOCK_DONE,
 697        SOCK_URGINLINE,
 698        SOCK_KEEPOPEN,
 699        SOCK_LINGER,
 700        SOCK_DESTROY,
 701        SOCK_BROADCAST,
 702        SOCK_TIMESTAMP,
 703        SOCK_ZAPPED,
 704        SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
 705        SOCK_DBG, /* %SO_DEBUG setting */
 706        SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
 707        SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
 708        SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
 709        SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
 710        SOCK_MEMALLOC, /* VM depends on this socket for swapping */
 711        SOCK_TIMESTAMPING_RX_SOFTWARE,  /* %SOF_TIMESTAMPING_RX_SOFTWARE */
 712        SOCK_FASYNC, /* fasync() active */
 713        SOCK_RXQ_OVFL,
 714        SOCK_ZEROCOPY, /* buffers from userspace */
 715        SOCK_WIFI_STATUS, /* push wifi status to userspace */
 716        SOCK_NOFCS, /* Tell NIC not to do the Ethernet FCS.
 717                     * Will use last 4 bytes of packet sent from
 718                     * user-space instead.
 719                     */
 720        SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */
 721        SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
 722};
 723
 724static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
 725{
 726        nsk->sk_flags = osk->sk_flags;
 727}
 728
 729static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
 730{
 731        __set_bit(flag, &sk->sk_flags);
 732}
 733
 734static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
 735{
 736        __clear_bit(flag, &sk->sk_flags);
 737}
 738
 739static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
 740{
 741        return test_bit(flag, &sk->sk_flags);
 742}
 743
 744#ifdef CONFIG_NET
 745extern struct static_key memalloc_socks;
 746static inline int sk_memalloc_socks(void)
 747{
 748        return static_key_false(&memalloc_socks);
 749}
 750#else
 751
 752static inline int sk_memalloc_socks(void)
 753{
 754        return 0;
 755}
 756
 757#endif
 758
 759static inline gfp_t sk_gfp_atomic(struct sock *sk, gfp_t gfp_mask)
 760{
 761        return GFP_ATOMIC | (sk->sk_allocation & __GFP_MEMALLOC);
 762}
 763
 764static inline void sk_acceptq_removed(struct sock *sk)
 765{
 766        sk->sk_ack_backlog--;
 767}
 768
 769static inline void sk_acceptq_added(struct sock *sk)
 770{
 771        sk->sk_ack_backlog++;
 772}
 773
 774static inline bool sk_acceptq_is_full(const struct sock *sk)
 775{
 776        return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
 777}
 778
 779/*
 780 * Compute minimal free write space needed to queue new packets.
 781 */
 782static inline int sk_stream_min_wspace(const struct sock *sk)
 783{
 784        return sk->sk_wmem_queued >> 1;
 785}
 786
 787static inline int sk_stream_wspace(const struct sock *sk)
 788{
 789        return sk->sk_sndbuf - sk->sk_wmem_queued;
 790}
 791
 792void sk_stream_write_space(struct sock *sk);
 793
 794/* OOB backlog add */
 795static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
 796{
 797        /* dont let skb dst not refcounted, we are going to leave rcu lock */
 798        skb_dst_force(skb);
 799
 800        if (!sk->sk_backlog.tail)
 801                sk->sk_backlog.head = skb;
 802        else
 803                sk->sk_backlog.tail->next = skb;
 804
 805        sk->sk_backlog.tail = skb;
 806        skb->next = NULL;
 807}
 808
 809/*
 810 * Take into account size of receive queue and backlog queue
 811 * Do not take into account this skb truesize,
 812 * to allow even a single big packet to come.
 813 */
 814static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit)
 815{
 816        unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
 817
 818        return qsize > limit;
 819}
 820
 821/* The per-socket spinlock must be held here. */
 822static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
 823                                              unsigned int limit)
 824{
 825        if (sk_rcvqueues_full(sk, limit))
 826                return -ENOBUFS;
 827
 828        __sk_add_backlog(sk, skb);
 829        sk->sk_backlog.len += skb->truesize;
 830        return 0;
 831}
 832
 833int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
 834
 835static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 836{
 837        if (sk_memalloc_socks() && skb_pfmemalloc(skb))
 838                return __sk_backlog_rcv(sk, skb);
 839
 840        return sk->sk_backlog_rcv(sk, skb);
 841}
 842
 843static inline void sk_incoming_cpu_update(struct sock *sk)
 844{
 845        sk->sk_incoming_cpu = raw_smp_processor_id();
 846}
 847
 848static inline void sock_rps_record_flow_hash(__u32 hash)
 849{
 850#ifdef CONFIG_RPS
 851        struct rps_sock_flow_table *sock_flow_table;
 852
 853        rcu_read_lock();
 854        sock_flow_table = rcu_dereference(rps_sock_flow_table);
 855        rps_record_sock_flow(sock_flow_table, hash);
 856        rcu_read_unlock();
 857#endif
 858}
 859
 860static inline void sock_rps_reset_flow_hash(__u32 hash)
 861{
 862#ifdef CONFIG_RPS
 863        struct rps_sock_flow_table *sock_flow_table;
 864
 865        rcu_read_lock();
 866        sock_flow_table = rcu_dereference(rps_sock_flow_table);
 867        rps_reset_sock_flow(sock_flow_table, hash);
 868        rcu_read_unlock();
 869#endif
 870}
 871
 872static inline void sock_rps_record_flow(const struct sock *sk)
 873{
 874#ifdef CONFIG_RPS
 875        sock_rps_record_flow_hash(sk->sk_rxhash);
 876#endif
 877}
 878
 879static inline void sock_rps_reset_flow(const struct sock *sk)
 880{
 881#ifdef CONFIG_RPS
 882        sock_rps_reset_flow_hash(sk->sk_rxhash);
 883#endif
 884}
 885
 886static inline void sock_rps_save_rxhash(struct sock *sk,
 887                                        const struct sk_buff *skb)
 888{
 889#ifdef CONFIG_RPS
 890        if (unlikely(sk->sk_rxhash != skb->hash)) {
 891                sock_rps_reset_flow(sk);
 892                sk->sk_rxhash = skb->hash;
 893        }
 894#endif
 895}
 896
 897static inline void sock_rps_reset_rxhash(struct sock *sk)
 898{
 899#ifdef CONFIG_RPS
 900        sock_rps_reset_flow(sk);
 901        sk->sk_rxhash = 0;
 902#endif
 903}
 904
 905#define sk_wait_event(__sk, __timeo, __condition)                       \
 906        ({      int __rc;                                               \
 907                release_sock(__sk);                                     \
 908                __rc = __condition;                                     \
 909                if (!__rc) {                                            \
 910                        *(__timeo) = schedule_timeout(*(__timeo));      \
 911                }                                                       \
 912                sched_annotate_sleep();                                         \
 913                lock_sock(__sk);                                        \
 914                __rc = __condition;                                     \
 915                __rc;                                                   \
 916        })
 917
 918int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
 919int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
 920void sk_stream_wait_close(struct sock *sk, long timeo_p);
 921int sk_stream_error(struct sock *sk, int flags, int err);
 922void sk_stream_kill_queues(struct sock *sk);
 923void sk_set_memalloc(struct sock *sk);
 924void sk_clear_memalloc(struct sock *sk);
 925
 926int sk_wait_data(struct sock *sk, long *timeo);
 927
 928struct request_sock_ops;
 929struct timewait_sock_ops;
 930struct inet_hashinfo;
 931struct raw_hashinfo;
 932struct module;
 933
 934/*
 935 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
 936 * un-modified. Special care is taken when initializing object to zero.
 937 */
 938static inline void sk_prot_clear_nulls(struct sock *sk, int size)
 939{
 940        if (offsetof(struct sock, sk_node.next) != 0)
 941                memset(sk, 0, offsetof(struct sock, sk_node.next));
 942        memset(&sk->sk_node.pprev, 0,
 943               size - offsetof(struct sock, sk_node.pprev));
 944}
 945
 946/* Networking protocol blocks we attach to sockets.
 947 * socket layer -> transport layer interface
 948 * transport -> network interface is defined by struct inet_proto
 949 */
 950struct proto {
 951        void                    (*close)(struct sock *sk,
 952                                        long timeout);
 953        int                     (*connect)(struct sock *sk,
 954                                        struct sockaddr *uaddr,
 955                                        int addr_len);
 956        int                     (*disconnect)(struct sock *sk, int flags);
 957
 958        struct sock *           (*accept)(struct sock *sk, int flags, int *err);
 959
 960        int                     (*ioctl)(struct sock *sk, int cmd,
 961                                         unsigned long arg);
 962        int                     (*init)(struct sock *sk);
 963        void                    (*destroy)(struct sock *sk);
 964        void                    (*shutdown)(struct sock *sk, int how);
 965        int                     (*setsockopt)(struct sock *sk, int level,
 966                                        int optname, char __user *optval,
 967                                        unsigned int optlen);
 968        int                     (*getsockopt)(struct sock *sk, int level,
 969                                        int optname, char __user *optval,
 970                                        int __user *option);
 971#ifdef CONFIG_COMPAT
 972        int                     (*compat_setsockopt)(struct sock *sk,
 973                                        int level,
 974                                        int optname, char __user *optval,
 975                                        unsigned int optlen);
 976        int                     (*compat_getsockopt)(struct sock *sk,
 977                                        int level,
 978                                        int optname, char __user *optval,
 979                                        int __user *option);
 980        int                     (*compat_ioctl)(struct sock *sk,
 981                                        unsigned int cmd, unsigned long arg);
 982#endif
 983        int                     (*sendmsg)(struct kiocb *iocb, struct sock *sk,
 984                                           struct msghdr *msg, size_t len);
 985        int                     (*recvmsg)(struct kiocb *iocb, struct sock *sk,
 986                                           struct msghdr *msg,
 987                                           size_t len, int noblock, int flags,
 988                                           int *addr_len);
 989        int                     (*sendpage)(struct sock *sk, struct page *page,
 990                                        int offset, size_t size, int flags);
 991        int                     (*bind)(struct sock *sk,
 992                                        struct sockaddr *uaddr, int addr_len);
 993
 994        int                     (*backlog_rcv) (struct sock *sk,
 995                                                struct sk_buff *skb);
 996
 997        void            (*release_cb)(struct sock *sk);
 998
 999        /* Keeping track of sk's, looking them up, and port selection methods. */
1000        void                    (*hash)(struct sock *sk);
1001        void                    (*unhash)(struct sock *sk);
1002        void                    (*rehash)(struct sock *sk);
1003        int                     (*get_port)(struct sock *sk, unsigned short snum);
1004        void                    (*clear_sk)(struct sock *sk, int size);
1005
1006        /* Keeping track of sockets in use */
1007#ifdef CONFIG_PROC_FS
1008        unsigned int            inuse_idx;
1009#endif
1010
1011        bool                    (*stream_memory_free)(const struct sock *sk);
1012        /* Memory pressure */
1013        void                    (*enter_memory_pressure)(struct sock *sk);
1014        atomic_long_t           *memory_allocated;      /* Current allocated memory. */
1015        struct percpu_counter   *sockets_allocated;     /* Current number of sockets. */
1016        /*
1017         * Pressure flag: try to collapse.
1018         * Technical note: it is used by multiple contexts non atomically.
1019         * All the __sk_mem_schedule() is of this nature: accounting
1020         * is strict, actions are advisory and have some latency.
1021         */
1022        int                     *memory_pressure;
1023        long                    *sysctl_mem;
1024        int                     *sysctl_wmem;
1025        int                     *sysctl_rmem;
1026        int                     max_header;
1027        bool                    no_autobind;
1028
1029        struct kmem_cache       *slab;
1030        unsigned int            obj_size;
1031        int                     slab_flags;
1032
1033        struct percpu_counter   *orphan_count;
1034
1035        struct request_sock_ops *rsk_prot;
1036        struct timewait_sock_ops *twsk_prot;
1037
1038        union {
1039                struct inet_hashinfo    *hashinfo;
1040                struct udp_table        *udp_table;
1041                struct raw_hashinfo     *raw_hash;
1042        } h;
1043
1044        struct module           *owner;
1045
1046        char                    name[32];
1047
1048        struct list_head        node;
1049#ifdef SOCK_REFCNT_DEBUG
1050        atomic_t                socks;
1051#endif
1052#ifdef CONFIG_MEMCG_KMEM
1053        /*
1054         * cgroup specific init/deinit functions. Called once for all
1055         * protocols that implement it, from cgroups populate function.
1056         * This function has to setup any files the protocol want to
1057         * appear in the kmem cgroup filesystem.
1058         */
1059        int                     (*init_cgroup)(struct mem_cgroup *memcg,
1060                                               struct cgroup_subsys *ss);
1061        void                    (*destroy_cgroup)(struct mem_cgroup *memcg);
1062        struct cg_proto         *(*proto_cgroup)(struct mem_cgroup *memcg);
1063#endif
1064};
1065
1066/*
1067 * Bits in struct cg_proto.flags
1068 */
1069enum cg_proto_flags {
1070        /* Currently active and new sockets should be assigned to cgroups */
1071        MEMCG_SOCK_ACTIVE,
1072        /* It was ever activated; we must disarm static keys on destruction */
1073        MEMCG_SOCK_ACTIVATED,
1074};
1075
1076struct cg_proto {
1077        struct page_counter     memory_allocated;       /* Current allocated memory. */
1078        struct percpu_counter   sockets_allocated;      /* Current number of sockets. */
1079        int                     memory_pressure;
1080        long                    sysctl_mem[3];
1081        unsigned long           flags;
1082        /*
1083         * memcg field is used to find which memcg we belong directly
1084         * Each memcg struct can hold more than one cg_proto, so container_of
1085         * won't really cut.
1086         *
1087         * The elegant solution would be having an inverse function to
1088         * proto_cgroup in struct proto, but that means polluting the structure
1089         * for everybody, instead of just for memcg users.
1090         */
1091        struct mem_cgroup       *memcg;
1092};
1093
1094int proto_register(struct proto *prot, int alloc_slab);
1095void proto_unregister(struct proto *prot);
1096
1097static inline bool memcg_proto_active(struct cg_proto *cg_proto)
1098{
1099        return test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
1100}
1101
1102static inline bool memcg_proto_activated(struct cg_proto *cg_proto)
1103{
1104        return test_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags);
1105}
1106
1107#ifdef SOCK_REFCNT_DEBUG
1108static inline void sk_refcnt_debug_inc(struct sock *sk)
1109{
1110        atomic_inc(&sk->sk_prot->socks);
1111}
1112
1113static inline void sk_refcnt_debug_dec(struct sock *sk)
1114{
1115        atomic_dec(&sk->sk_prot->socks);
1116        printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
1117               sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
1118}
1119
1120static inline void sk_refcnt_debug_release(const struct sock *sk)
1121{
1122        if (atomic_read(&sk->sk_refcnt) != 1)
1123                printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
1124                       sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
1125}
1126#else /* SOCK_REFCNT_DEBUG */
1127#define sk_refcnt_debug_inc(sk) do { } while (0)
1128#define sk_refcnt_debug_dec(sk) do { } while (0)
1129#define sk_refcnt_debug_release(sk) do { } while (0)
1130#endif /* SOCK_REFCNT_DEBUG */
1131
1132#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_NET)
1133extern struct static_key memcg_socket_limit_enabled;
1134static inline struct cg_proto *parent_cg_proto(struct proto *proto,
1135                                               struct cg_proto *cg_proto)
1136{
1137        return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg));
1138}
1139#define mem_cgroup_sockets_enabled static_key_false(&memcg_socket_limit_enabled)
1140#else
1141#define mem_cgroup_sockets_enabled 0
1142static inline struct cg_proto *parent_cg_proto(struct proto *proto,
1143                                               struct cg_proto *cg_proto)
1144{
1145        return NULL;
1146}
1147#endif
1148
1149static inline bool sk_stream_memory_free(const struct sock *sk)
1150{
1151        if (sk->sk_wmem_queued >= sk->sk_sndbuf)
1152                return false;
1153
1154        return sk->sk_prot->stream_memory_free ?
1155                sk->sk_prot->stream_memory_free(sk) : true;
1156}
1157
1158static inline bool sk_stream_is_writeable(const struct sock *sk)
1159{
1160        return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
1161               sk_stream_memory_free(sk);
1162}
1163
1164
1165static inline bool sk_has_memory_pressure(const struct sock *sk)
1166{
1167        return sk->sk_prot->memory_pressure != NULL;
1168}
1169
1170static inline bool sk_under_memory_pressure(const struct sock *sk)
1171{
1172        if (!sk->sk_prot->memory_pressure)
1173                return false;
1174
1175        if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1176                return !!sk->sk_cgrp->memory_pressure;
1177
1178        return !!*sk->sk_prot->memory_pressure;
1179}
1180
1181static inline void sk_leave_memory_pressure(struct sock *sk)
1182{
1183        int *memory_pressure = sk->sk_prot->memory_pressure;
1184
1185        if (!memory_pressure)
1186                return;
1187
1188        if (*memory_pressure)
1189                *memory_pressure = 0;
1190
1191        if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
1192                struct cg_proto *cg_proto = sk->sk_cgrp;
1193                struct proto *prot = sk->sk_prot;
1194
1195                for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
1196                        cg_proto->memory_pressure = 0;
1197        }
1198
1199}
1200
1201static inline void sk_enter_memory_pressure(struct sock *sk)
1202{
1203        if (!sk->sk_prot->enter_memory_pressure)
1204                return;
1205
1206        if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
1207                struct cg_proto *cg_proto = sk->sk_cgrp;
1208                struct proto *prot = sk->sk_prot;
1209
1210                for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
1211                        cg_proto->memory_pressure = 1;
1212        }
1213
1214        sk->sk_prot->enter_memory_pressure(sk);
1215}
1216
1217static inline long sk_prot_mem_limits(const struct sock *sk, int index)
1218{
1219        long *prot = sk->sk_prot->sysctl_mem;
1220        if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1221                prot = sk->sk_cgrp->sysctl_mem;
1222        return prot[index];
1223}
1224
1225static inline void memcg_memory_allocated_add(struct cg_proto *prot,
1226                                              unsigned long amt,
1227                                              int *parent_status)
1228{
1229        page_counter_charge(&prot->memory_allocated, amt);
1230
1231        if (page_counter_read(&prot->memory_allocated) >
1232            prot->memory_allocated.limit)
1233                *parent_status = OVER_LIMIT;
1234}
1235
1236static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
1237                                              unsigned long amt)
1238{
1239        page_counter_uncharge(&prot->memory_allocated, amt);
1240}
1241
1242static inline long
1243sk_memory_allocated(const struct sock *sk)
1244{
1245        struct proto *prot = sk->sk_prot;
1246
1247        if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1248                return page_counter_read(&sk->sk_cgrp->memory_allocated);
1249
1250        return atomic_long_read(prot->memory_allocated);
1251}
1252
1253static inline long
1254sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status)
1255{
1256        struct proto *prot = sk->sk_prot;
1257
1258        if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
1259                memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status);
1260                /* update the root cgroup regardless */
1261                atomic_long_add_return(amt, prot->memory_allocated);
1262                return page_counter_read(&sk->sk_cgrp->memory_allocated);
1263        }
1264
1265        return atomic_long_add_return(amt, prot->memory_allocated);
1266}
1267
1268static inline void
1269sk_memory_allocated_sub(struct sock *sk, int amt)
1270{
1271        struct proto *prot = sk->sk_prot;
1272
1273        if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1274                memcg_memory_allocated_sub(sk->sk_cgrp, amt);
1275
1276        atomic_long_sub(amt, prot->memory_allocated);
1277}
1278
1279static inline void sk_sockets_allocated_dec(struct sock *sk)
1280{
1281        struct proto *prot = sk->sk_prot;
1282
1283        if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
1284                struct cg_proto *cg_proto = sk->sk_cgrp;
1285
1286                for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
1287                        percpu_counter_dec(&cg_proto->sockets_allocated);
1288        }
1289
1290        percpu_counter_dec(prot->sockets_allocated);
1291}
1292
1293static inline void sk_sockets_allocated_inc(struct sock *sk)
1294{
1295        struct proto *prot = sk->sk_prot;
1296
1297        if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
1298                struct cg_proto *cg_proto = sk->sk_cgrp;
1299
1300                for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
1301                        percpu_counter_inc(&cg_proto->sockets_allocated);
1302        }
1303
1304        percpu_counter_inc(prot->sockets_allocated);
1305}
1306
1307static inline int
1308sk_sockets_allocated_read_positive(struct sock *sk)
1309{
1310        struct proto *prot = sk->sk_prot;
1311
1312        if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1313                return percpu_counter_read_positive(&sk->sk_cgrp->sockets_allocated);
1314
1315        return percpu_counter_read_positive(prot->sockets_allocated);
1316}
1317
1318static inline int
1319proto_sockets_allocated_sum_positive(struct proto *prot)
1320{
1321        return percpu_counter_sum_positive(prot->sockets_allocated);
1322}
1323
1324static inline long
1325proto_memory_allocated(struct proto *prot)
1326{
1327        return atomic_long_read(prot->memory_allocated);
1328}
1329
1330static inline bool
1331proto_memory_pressure(struct proto *prot)
1332{
1333        if (!prot->memory_pressure)
1334                return false;
1335        return !!*prot->memory_pressure;
1336}
1337
1338
1339#ifdef CONFIG_PROC_FS
1340/* Called with local bh disabled */
1341void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
1342int sock_prot_inuse_get(struct net *net, struct proto *proto);
1343#else
1344static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
1345                int inc)
1346{
1347}
1348#endif
1349
1350
1351/* With per-bucket locks this operation is not-atomic, so that
1352 * this version is not worse.
1353 */
1354static inline void __sk_prot_rehash(struct sock *sk)
1355{
1356        sk->sk_prot->unhash(sk);
1357        sk->sk_prot->hash(sk);
1358}
1359
1360void sk_prot_clear_portaddr_nulls(struct sock *sk, int size);
1361
1362/* About 10 seconds */
1363#define SOCK_DESTROY_TIME (10*HZ)
1364
1365/* Sockets 0-1023 can't be bound to unless you are superuser */
1366#define PROT_SOCK       1024
1367
1368#define SHUTDOWN_MASK   3
1369#define RCV_SHUTDOWN    1
1370#define SEND_SHUTDOWN   2
1371
1372#define SOCK_SNDBUF_LOCK        1
1373#define SOCK_RCVBUF_LOCK        2
1374#define SOCK_BINDADDR_LOCK      4
1375#define SOCK_BINDPORT_LOCK      8
1376
1377/* sock_iocb: used to kick off async processing of socket ios */
1378struct sock_iocb {
1379        struct list_head        list;
1380
1381        int                     flags;
1382        int                     size;
1383        struct socket           *sock;
1384        struct sock             *sk;
1385        struct scm_cookie       *scm;
1386        struct msghdr           *msg, async_msg;
1387        struct kiocb            *kiocb;
1388};
1389
1390static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
1391{
1392        return (struct sock_iocb *)iocb->private;
1393}
1394
1395static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
1396{
1397        return si->kiocb;
1398}
1399
1400struct socket_alloc {
1401        struct socket socket;
1402        struct inode vfs_inode;
1403};
1404
1405static inline struct socket *SOCKET_I(struct inode *inode)
1406{
1407        return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
1408}
1409
1410static inline struct inode *SOCK_INODE(struct socket *socket)
1411{
1412        return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
1413}
1414
1415/*
1416 * Functions for memory accounting
1417 */
1418int __sk_mem_schedule(struct sock *sk, int size, int kind);
1419void __sk_mem_reclaim(struct sock *sk);
1420
1421#define SK_MEM_QUANTUM ((int)PAGE_SIZE)
1422#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
1423#define SK_MEM_SEND     0
1424#define SK_MEM_RECV     1
1425
1426static inline int sk_mem_pages(int amt)
1427{
1428        return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
1429}
1430
1431static inline bool sk_has_account(struct sock *sk)
1432{
1433        /* return true if protocol supports memory accounting */
1434        return !!sk->sk_prot->memory_allocated;
1435}
1436
1437static inline bool sk_wmem_schedule(struct sock *sk, int size)
1438{
1439        if (!sk_has_account(sk))
1440                return true;
1441        return size <= sk->sk_forward_alloc ||
1442                __sk_mem_schedule(sk, size, SK_MEM_SEND);
1443}
1444
1445static inline bool
1446sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
1447{
1448        if (!sk_has_account(sk))
1449                return true;
1450        return size<= sk->sk_forward_alloc ||
1451                __sk_mem_schedule(sk, size, SK_MEM_RECV) ||
1452                skb_pfmemalloc(skb);
1453}
1454
1455static inline void sk_mem_reclaim(struct sock *sk)
1456{
1457        if (!sk_has_account(sk))
1458                return;
1459        if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
1460                __sk_mem_reclaim(sk);
1461}
1462
1463static inline void sk_mem_reclaim_partial(struct sock *sk)
1464{
1465        if (!sk_has_account(sk))
1466                return;
1467        if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
1468                __sk_mem_reclaim(sk);
1469}
1470
1471static inline void sk_mem_charge(struct sock *sk, int size)
1472{
1473        if (!sk_has_account(sk))
1474                return;
1475        sk->sk_forward_alloc -= size;
1476}
1477
1478static inline void sk_mem_uncharge(struct sock *sk, int size)
1479{
1480        if (!sk_has_account(sk))
1481                return;
1482        sk->sk_forward_alloc += size;
1483}
1484
1485static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
1486{
1487        sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
1488        sk->sk_wmem_queued -= skb->truesize;
1489        sk_mem_uncharge(sk, skb->truesize);
1490        __kfree_skb(skb);
1491}
1492
1493/* Used by processes to "lock" a socket state, so that
1494 * interrupts and bottom half handlers won't change it
1495 * from under us. It essentially blocks any incoming
1496 * packets, so that we won't get any new data or any
1497 * packets that change the state of the socket.
1498 *
1499 * While locked, BH processing will add new packets to
1500 * the backlog queue.  This queue is processed by the
1501 * owner of the socket lock right before it is released.
1502 *
1503 * Since ~2.3.5 it is also exclusive sleep lock serializing
1504 * accesses from user process context.
1505 */
1506#define sock_owned_by_user(sk)  ((sk)->sk_lock.owned)
1507
1508static inline void sock_release_ownership(struct sock *sk)
1509{
1510        sk->sk_lock.owned = 0;
1511}
1512
1513/*
1514 * Macro so as to not evaluate some arguments when
1515 * lockdep is not enabled.
1516 *
1517 * Mark both the sk_lock and the sk_lock.slock as a
1518 * per-address-family lock class.
1519 */
1520#define sock_lock_init_class_and_name(sk, sname, skey, name, key)       \
1521do {                                                                    \
1522        sk->sk_lock.owned = 0;                                          \
1523        init_waitqueue_head(&sk->sk_lock.wq);                           \
1524        spin_lock_init(&(sk)->sk_lock.slock);                           \
1525        debug_check_no_locks_freed((void *)&(sk)->sk_lock,              \
1526                        sizeof((sk)->sk_lock));                         \
1527        lockdep_set_class_and_name(&(sk)->sk_lock.slock,                \
1528                                (skey), (sname));                               \
1529        lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);     \
1530} while (0)
1531
1532void lock_sock_nested(struct sock *sk, int subclass);
1533
1534static inline void lock_sock(struct sock *sk)
1535{
1536        lock_sock_nested(sk, 0);
1537}
1538
1539void release_sock(struct sock *sk);
1540
1541/* BH context may only use the following locking interface. */
1542#define bh_lock_sock(__sk)      spin_lock(&((__sk)->sk_lock.slock))
1543#define bh_lock_sock_nested(__sk) \
1544                                spin_lock_nested(&((__sk)->sk_lock.slock), \
1545                                SINGLE_DEPTH_NESTING)
1546#define bh_unlock_sock(__sk)    spin_unlock(&((__sk)->sk_lock.slock))
1547
1548bool lock_sock_fast(struct sock *sk);
1549/**
1550 * unlock_sock_fast - complement of lock_sock_fast
1551 * @sk: socket
1552 * @slow: slow mode
1553 *
1554 * fast unlock socket for user context.
1555 * If slow mode is on, we call regular release_sock()
1556 */
1557static inline void unlock_sock_fast(struct sock *sk, bool slow)
1558{
1559        if (slow)
1560                release_sock(sk);
1561        else
1562                spin_unlock_bh(&sk->sk_lock.slock);
1563}
1564
1565
1566struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1567                      struct proto *prot);
1568void sk_free(struct sock *sk);
1569void sk_release_kernel(struct sock *sk);
1570struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
1571
1572struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1573                             gfp_t priority);
1574void sock_wfree(struct sk_buff *skb);
1575void skb_orphan_partial(struct sk_buff *skb);
1576void sock_rfree(struct sk_buff *skb);
1577void sock_efree(struct sk_buff *skb);
1578#ifdef CONFIG_INET
1579void sock_edemux(struct sk_buff *skb);
1580#else
1581#define sock_edemux(skb) sock_efree(skb)
1582#endif
1583
1584int sock_setsockopt(struct socket *sock, int level, int op,
1585                    char __user *optval, unsigned int optlen);
1586
1587int sock_getsockopt(struct socket *sock, int level, int op,
1588                    char __user *optval, int __user *optlen);
1589struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1590                                    int noblock, int *errcode);
1591struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1592                                     unsigned long data_len, int noblock,
1593                                     int *errcode, int max_page_order);
1594void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
1595void sock_kfree_s(struct sock *sk, void *mem, int size);
1596void sock_kzfree_s(struct sock *sk, void *mem, int size);
1597void sk_send_sigurg(struct sock *sk);
1598
1599/*
1600 * Functions to fill in entries in struct proto_ops when a protocol
1601 * does not implement a particular function.
1602 */
1603int sock_no_bind(struct socket *, struct sockaddr *, int);
1604int sock_no_connect(struct socket *, struct sockaddr *, int, int);
1605int sock_no_socketpair(struct socket *, struct socket *);
1606int sock_no_accept(struct socket *, struct socket *, int);
1607int sock_no_getname(struct socket *, struct sockaddr *, int *, int);
1608unsigned int sock_no_poll(struct file *, struct socket *,
1609                          struct poll_table_struct *);
1610int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
1611int sock_no_listen(struct socket *, int);
1612int sock_no_shutdown(struct socket *, int);
1613int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *);
1614int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int);
1615int sock_no_sendmsg(struct kiocb *, struct socket *, struct msghdr *, size_t);
1616int sock_no_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t,
1617                    int);
1618int sock_no_mmap(struct file *file, struct socket *sock,
1619                 struct vm_area_struct *vma);
1620ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
1621                         size_t size, int flags);
1622
1623/*
1624 * Functions to fill in entries in struct proto_ops when a protocol
1625 * uses the inet style.
1626 */
1627int sock_common_getsockopt(struct socket *sock, int level, int optname,
1628                                  char __user *optval, int __user *optlen);
1629int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
1630                               struct msghdr *msg, size_t size, int flags);
1631int sock_common_setsockopt(struct socket *sock, int level, int optname,
1632                                  char __user *optval, unsigned int optlen);
1633int compat_sock_common_getsockopt(struct socket *sock, int level,
1634                int optname, char __user *optval, int __user *optlen);
1635int compat_sock_common_setsockopt(struct socket *sock, int level,
1636                int optname, char __user *optval, unsigned int optlen);
1637
1638void sk_common_release(struct sock *sk);
1639
1640/*
1641 *      Default socket callbacks and setup code
1642 */
1643
1644/* Initialise core socket variables */
1645void sock_init_data(struct socket *sock, struct sock *sk);
1646
1647/*
1648 * Socket reference counting postulates.
1649 *
1650 * * Each user of socket SHOULD hold a reference count.
1651 * * Each access point to socket (an hash table bucket, reference from a list,
1652 *   running timer, skb in flight MUST hold a reference count.
1653 * * When reference count hits 0, it means it will never increase back.
1654 * * When reference count hits 0, it means that no references from
1655 *   outside exist to this socket and current process on current CPU
1656 *   is last user and may/should destroy this socket.
1657 * * sk_free is called from any context: process, BH, IRQ. When
1658 *   it is called, socket has no references from outside -> sk_free
1659 *   may release descendant resources allocated by the socket, but
1660 *   to the time when it is called, socket is NOT referenced by any
1661 *   hash tables, lists etc.
1662 * * Packets, delivered from outside (from network or from another process)
1663 *   and enqueued on receive/error queues SHOULD NOT grab reference count,
1664 *   when they sit in queue. Otherwise, packets will leak to hole, when
1665 *   socket is looked up by one cpu and unhasing is made by another CPU.
1666 *   It is true for udp/raw, netlink (leak to receive and error queues), tcp
1667 *   (leak to backlog). Packet socket does all the processing inside
1668 *   BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
1669 *   use separate SMP lock, so that they are prone too.
1670 */
1671
1672/* Ungrab socket and destroy it, if it was the last reference. */
1673static inline void sock_put(struct sock *sk)
1674{
1675        if (atomic_dec_and_test(&sk->sk_refcnt))
1676                sk_free(sk);
1677}
1678/* Generic version of sock_put(), dealing with all sockets
1679 * (TCP_TIMEWAIT, ESTABLISHED...)
1680 */
1681void sock_gen_put(struct sock *sk);
1682
1683int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested);
1684
1685static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
1686{
1687        sk->sk_tx_queue_mapping = tx_queue;
1688}
1689
1690static inline void sk_tx_queue_clear(struct sock *sk)
1691{
1692        sk->sk_tx_queue_mapping = -1;
1693}
1694
1695static inline int sk_tx_queue_get(const struct sock *sk)
1696{
1697        return sk ? sk->sk_tx_queue_mapping : -1;
1698}
1699
1700static inline void sk_set_socket(struct sock *sk, struct socket *sock)
1701{
1702        sk_tx_queue_clear(sk);
1703        sk->sk_socket = sock;
1704}
1705
1706static inline wait_queue_head_t *sk_sleep(struct sock *sk)
1707{
1708        BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
1709        return &rcu_dereference_raw(sk->sk_wq)->wait;
1710}
1711/* Detach socket from process context.
1712 * Announce socket dead, detach it from wait queue and inode.
1713 * Note that parent inode held reference count on this struct sock,
1714 * we do not release it in this function, because protocol
1715 * probably wants some additional cleanups or even continuing
1716 * to work with this socket (TCP).
1717 */
1718static inline void sock_orphan(struct sock *sk)
1719{
1720        write_lock_bh(&sk->sk_callback_lock);
1721        sock_set_flag(sk, SOCK_DEAD);
1722        sk_set_socket(sk, NULL);
1723        sk->sk_wq  = NULL;
1724        write_unlock_bh(&sk->sk_callback_lock);
1725}
1726
1727static inline void sock_graft(struct sock *sk, struct socket *parent)
1728{
1729        write_lock_bh(&sk->sk_callback_lock);
1730        sk->sk_wq = parent->wq;
1731        parent->sk = sk;
1732        sk_set_socket(sk, parent);
1733        security_sock_graft(sk, parent);
1734        write_unlock_bh(&sk->sk_callback_lock);
1735}
1736
1737kuid_t sock_i_uid(struct sock *sk);
1738unsigned long sock_i_ino(struct sock *sk);
1739
1740static inline struct dst_entry *
1741__sk_dst_get(struct sock *sk)
1742{
1743        return rcu_dereference_check(sk->sk_dst_cache, sock_owned_by_user(sk) ||
1744                                                       lockdep_is_held(&sk->sk_lock.slock));
1745}
1746
1747static inline struct dst_entry *
1748sk_dst_get(struct sock *sk)
1749{
1750        struct dst_entry *dst;
1751
1752        rcu_read_lock();
1753        dst = rcu_dereference(sk->sk_dst_cache);
1754        if (dst && !atomic_inc_not_zero(&dst->__refcnt))
1755                dst = NULL;
1756        rcu_read_unlock();
1757        return dst;
1758}
1759
1760static inline void dst_negative_advice(struct sock *sk)
1761{
1762        struct dst_entry *ndst, *dst = __sk_dst_get(sk);
1763
1764        if (dst && dst->ops->negative_advice) {
1765                ndst = dst->ops->negative_advice(dst);
1766
1767                if (ndst != dst) {
1768                        rcu_assign_pointer(sk->sk_dst_cache, ndst);
1769                        sk_tx_queue_clear(sk);
1770                }
1771        }
1772}
1773
1774static inline void
1775__sk_dst_set(struct sock *sk, struct dst_entry *dst)
1776{
1777        struct dst_entry *old_dst;
1778
1779        sk_tx_queue_clear(sk);
1780        /*
1781         * This can be called while sk is owned by the caller only,
1782         * with no state that can be checked in a rcu_dereference_check() cond
1783         */
1784        old_dst = rcu_dereference_raw(sk->sk_dst_cache);
1785        rcu_assign_pointer(sk->sk_dst_cache, dst);
1786        dst_release(old_dst);
1787}
1788
1789static inline void
1790sk_dst_set(struct sock *sk, struct dst_entry *dst)
1791{
1792        struct dst_entry *old_dst;
1793
1794        sk_tx_queue_clear(sk);
1795        old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
1796        dst_release(old_dst);
1797}
1798
1799static inline void
1800__sk_dst_reset(struct sock *sk)
1801{
1802        __sk_dst_set(sk, NULL);
1803}
1804
1805static inline void
1806sk_dst_reset(struct sock *sk)
1807{
1808        sk_dst_set(sk, NULL);
1809}
1810
1811struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1812
1813struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1814
1815static inline bool sk_can_gso(const struct sock *sk)
1816{
1817        return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
1818}
1819
1820void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
1821
1822static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
1823{
1824        sk->sk_route_nocaps |= flags;
1825        sk->sk_route_caps &= ~flags;
1826}
1827
1828static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
1829                                           char __user *from, char *to,
1830                                           int copy, int offset)
1831{
1832        if (skb->ip_summed == CHECKSUM_NONE) {
1833                int err = 0;
1834                __wsum csum = csum_and_copy_from_user(from, to, copy, 0, &err);
1835                if (err)
1836                        return err;
1837                skb->csum = csum_block_add(skb->csum, csum, offset);
1838        } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
1839                if (!access_ok(VERIFY_READ, from, copy) ||
1840                    __copy_from_user_nocache(to, from, copy))
1841                        return -EFAULT;
1842        } else if (copy_from_user(to, from, copy))
1843                return -EFAULT;
1844
1845        return 0;
1846}
1847
1848static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
1849                                       char __user *from, int copy)
1850{
1851        int err, offset = skb->len;
1852
1853        err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy),
1854                                       copy, offset);
1855        if (err)
1856                __skb_trim(skb, offset);
1857
1858        return err;
1859}
1860
1861static inline int skb_copy_to_page_nocache(struct sock *sk, char __user *from,
1862                                           struct sk_buff *skb,
1863                                           struct page *page,
1864                                           int off, int copy)
1865{
1866        int err;
1867
1868        err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off,
1869                                       copy, skb->len);
1870        if (err)
1871                return err;
1872
1873        skb->len             += copy;
1874        skb->data_len        += copy;
1875        skb->truesize        += copy;
1876        sk->sk_wmem_queued   += copy;
1877        sk_mem_charge(sk, copy);
1878        return 0;
1879}
1880
1881/**
1882 * sk_wmem_alloc_get - returns write allocations
1883 * @sk: socket
1884 *
1885 * Returns sk_wmem_alloc minus initial offset of one
1886 */
1887static inline int sk_wmem_alloc_get(const struct sock *sk)
1888{
1889        return atomic_read(&sk->sk_wmem_alloc) - 1;
1890}
1891
1892/**
1893 * sk_rmem_alloc_get - returns read allocations
1894 * @sk: socket
1895 *
1896 * Returns sk_rmem_alloc
1897 */
1898static inline int sk_rmem_alloc_get(const struct sock *sk)
1899{
1900        return atomic_read(&sk->sk_rmem_alloc);
1901}
1902
1903/**
1904 * sk_has_allocations - check if allocations are outstanding
1905 * @sk: socket
1906 *
1907 * Returns true if socket has write or read allocations
1908 */
1909static inline bool sk_has_allocations(const struct sock *sk)
1910{
1911        return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
1912}
1913
1914/**
1915 * wq_has_sleeper - check if there are any waiting processes
1916 * @wq: struct socket_wq
1917 *
1918 * Returns true if socket_wq has waiting processes
1919 *
1920 * The purpose of the wq_has_sleeper and sock_poll_wait is to wrap the memory
1921 * barrier call. They were added due to the race found within the tcp code.
1922 *
1923 * Consider following tcp code paths:
1924 *
1925 * CPU1                  CPU2
1926 *
1927 * sys_select            receive packet
1928 *   ...                 ...
1929 *   __add_wait_queue    update tp->rcv_nxt
1930 *   ...                 ...
1931 *   tp->rcv_nxt check   sock_def_readable
1932 *   ...                 {
1933 *   schedule               rcu_read_lock();
1934 *                          wq = rcu_dereference(sk->sk_wq);
1935 *                          if (wq && waitqueue_active(&wq->wait))
1936 *                              wake_up_interruptible(&wq->wait)
1937 *                          ...
1938 *                       }
1939 *
1940 * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay
1941 * in its cache, and so does the tp->rcv_nxt update on CPU2 side.  The CPU1
1942 * could then endup calling schedule and sleep forever if there are no more
1943 * data on the socket.
1944 *
1945 */
1946static inline bool wq_has_sleeper(struct socket_wq *wq)
1947{
1948        /* We need to be sure we are in sync with the
1949         * add_wait_queue modifications to the wait queue.
1950         *
1951         * This memory barrier is paired in the sock_poll_wait.
1952         */
1953        smp_mb();
1954        return wq && waitqueue_active(&wq->wait);
1955}
1956
1957/**
1958 * sock_poll_wait - place memory barrier behind the poll_wait call.
1959 * @filp:           file
1960 * @wait_address:   socket wait queue
1961 * @p:              poll_table
1962 *
1963 * See the comments in the wq_has_sleeper function.
1964 */
1965static inline void sock_poll_wait(struct file *filp,
1966                wait_queue_head_t *wait_address, poll_table *p)
1967{
1968        if (!poll_does_not_wait(p) && wait_address) {
1969                poll_wait(filp, wait_address, p);
1970                /* We need to be sure we are in sync with the
1971                 * socket flags modification.
1972                 *
1973                 * This memory barrier is paired in the wq_has_sleeper.
1974                 */
1975                smp_mb();
1976        }
1977}
1978
1979static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
1980{
1981        if (sk->sk_txhash) {
1982                skb->l4_hash = 1;
1983                skb->hash = sk->sk_txhash;
1984        }
1985}
1986
1987/*
1988 *      Queue a received datagram if it will fit. Stream and sequenced
1989 *      protocols can't normally use this as they need to fit buffers in
1990 *      and play with them.
1991 *
1992 *      Inlined as it's very short and called for pretty much every
1993 *      packet ever received.
1994 */
1995
1996static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1997{
1998        skb_orphan(skb);
1999        skb->sk = sk;
2000        skb->destructor = sock_wfree;
2001        skb_set_hash_from_sk(skb, sk);
2002        /*
2003         * We used to take a refcount on sk, but following operation
2004         * is enough to guarantee sk_free() wont free this sock until
2005         * all in-flight packets are completed
2006         */
2007        atomic_add(skb->truesize, &sk->sk_wmem_alloc);
2008}
2009
2010static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
2011{
2012        skb_orphan(skb);
2013        skb->sk = sk;
2014        skb->destructor = sock_rfree;
2015        atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2016        sk_mem_charge(sk, skb->truesize);
2017}
2018
2019void sk_reset_timer(struct sock *sk, struct timer_list *timer,
2020                    unsigned long expires);
2021
2022void sk_stop_timer(struct sock *sk, struct timer_list *timer);
2023
2024int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2025
2026int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
2027struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
2028
2029/*
2030 *      Recover an error report and clear atomically
2031 */
2032
2033static inline int sock_error(struct sock *sk)
2034{
2035        int err;
2036        if (likely(!sk->sk_err))
2037                return 0;
2038        err = xchg(&sk->sk_err, 0);
2039        return -err;
2040}
2041
2042static inline unsigned long sock_wspace(struct sock *sk)
2043{
2044        int amt = 0;
2045
2046        if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
2047                amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
2048                if (amt < 0)
2049                        amt = 0;
2050        }
2051        return amt;
2052}
2053
2054static inline void sk_wake_async(struct sock *sk, int how, int band)
2055{
2056        if (sock_flag(sk, SOCK_FASYNC))
2057                sock_wake_async(sk->sk_socket, how, band);
2058}
2059
2060/* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might
2061 * need sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak.
2062 * Note: for send buffers, TCP works better if we can build two skbs at
2063 * minimum.
2064 */
2065#define TCP_SKB_MIN_TRUESIZE    (2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff)))
2066
2067#define SOCK_MIN_SNDBUF         (TCP_SKB_MIN_TRUESIZE * 2)
2068#define SOCK_MIN_RCVBUF          TCP_SKB_MIN_TRUESIZE
2069
2070static inline void sk_stream_moderate_sndbuf(struct sock *sk)
2071{
2072        if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
2073                sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
2074                sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF);
2075        }
2076}
2077
2078struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
2079
2080/**
2081 * sk_page_frag - return an appropriate page_frag
2082 * @sk: socket
2083 *
2084 * If socket allocation mode allows current thread to sleep, it means its
2085 * safe to use the per task page_frag instead of the per socket one.
2086 */
2087static inline struct page_frag *sk_page_frag(struct sock *sk)
2088{
2089        if (sk->sk_allocation & __GFP_WAIT)
2090                return &current->task_frag;
2091
2092        return &sk->sk_frag;
2093}
2094
2095bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
2096
2097/*
2098 *      Default write policy as shown to user space via poll/select/SIGIO
2099 */
2100static inline bool sock_writeable(const struct sock *sk)
2101{
2102        return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
2103}
2104
2105static inline gfp_t gfp_any(void)
2106{
2107        return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
2108}
2109
2110static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
2111{
2112        return noblock ? 0 : sk->sk_rcvtimeo;
2113}
2114
2115static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
2116{
2117        return noblock ? 0 : sk->sk_sndtimeo;
2118}
2119
2120static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
2121{
2122        return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
2123}
2124
2125/* Alas, with timeout socket operations are not restartable.
2126 * Compare this to poll().
2127 */
2128static inline int sock_intr_errno(long timeo)
2129{
2130        return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
2131}
2132
2133void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
2134                           struct sk_buff *skb);
2135void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
2136                             struct sk_buff *skb);
2137
2138static inline void
2139sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
2140{
2141        ktime_t kt = skb->tstamp;
2142        struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
2143
2144        /*
2145         * generate control messages if
2146         * - receive time stamping in software requested
2147         * - software time stamp available and wanted
2148         * - hardware time stamps available and wanted
2149         */
2150        if (sock_flag(sk, SOCK_RCVTSTAMP) ||
2151            (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
2152            (kt.tv64 && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
2153            (hwtstamps->hwtstamp.tv64 &&
2154             (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
2155                __sock_recv_timestamp(msg, sk, skb);
2156        else
2157                sk->sk_stamp = kt;
2158
2159        if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
2160                __sock_recv_wifi_status(msg, sk, skb);
2161}
2162
2163void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2164                              struct sk_buff *skb);
2165
2166static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2167                                          struct sk_buff *skb)
2168{
2169#define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL)                       | \
2170                           (1UL << SOCK_RCVTSTAMP))
2171#define TSFLAGS_ANY       (SOF_TIMESTAMPING_SOFTWARE                    | \
2172                           SOF_TIMESTAMPING_RAW_HARDWARE)
2173
2174        if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
2175                __sock_recv_ts_and_drops(msg, sk, skb);
2176        else
2177                sk->sk_stamp = skb->tstamp;
2178}
2179
2180void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags);
2181
2182/**
2183 * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
2184 * @sk:         socket sending this packet
2185 * @tx_flags:   completed with instructions for time stamping
2186 *
2187 * Note : callers should take care of initial *tx_flags value (usually 0)
2188 */
2189static inline void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags)
2190{
2191        if (unlikely(sk->sk_tsflags))
2192                __sock_tx_timestamp(sk, tx_flags);
2193        if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
2194                *tx_flags |= SKBTX_WIFI_STATUS;
2195}
2196
2197/**
2198 * sk_eat_skb - Release a skb if it is no longer needed
2199 * @sk: socket to eat this skb from
2200 * @skb: socket buffer to eat
2201 *
2202 * This routine must be called with interrupts disabled or with the socket
2203 * locked so that the sk_buff queue operation is ok.
2204*/
2205static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
2206{
2207        __skb_unlink(skb, &sk->sk_receive_queue);
2208        __kfree_skb(skb);
2209}
2210
2211static inline
2212struct net *sock_net(const struct sock *sk)
2213{
2214        return read_pnet(&sk->sk_net);
2215}
2216
2217static inline
2218void sock_net_set(struct sock *sk, struct net *net)
2219{
2220        write_pnet(&sk->sk_net, net);
2221}
2222
2223/*
2224 * Kernel sockets, f.e. rtnl or icmp_socket, are a part of a namespace.
2225 * They should not hold a reference to a namespace in order to allow
2226 * to stop it.
2227 * Sockets after sk_change_net should be released using sk_release_kernel
2228 */
2229static inline void sk_change_net(struct sock *sk, struct net *net)
2230{
2231        struct net *current_net = sock_net(sk);
2232
2233        if (!net_eq(current_net, net)) {
2234                put_net(current_net);
2235                sock_net_set(sk, hold_net(net));
2236        }
2237}
2238
2239static inline struct sock *skb_steal_sock(struct sk_buff *skb)
2240{
2241        if (skb->sk) {
2242                struct sock *sk = skb->sk;
2243
2244                skb->destructor = NULL;
2245                skb->sk = NULL;
2246                return sk;
2247        }
2248        return NULL;
2249}
2250
2251void sock_enable_timestamp(struct sock *sk, int flag);
2252int sock_get_timestamp(struct sock *, struct timeval __user *);
2253int sock_get_timestampns(struct sock *, struct timespec __user *);
2254int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
2255                       int type);
2256
2257bool sk_ns_capable(const struct sock *sk,
2258                   struct user_namespace *user_ns, int cap);
2259bool sk_capable(const struct sock *sk, int cap);
2260bool sk_net_capable(const struct sock *sk, int cap);
2261
2262extern __u32 sysctl_wmem_max;
2263extern __u32 sysctl_rmem_max;
2264
2265extern int sysctl_optmem_max;
2266
2267extern __u32 sysctl_wmem_default;
2268extern __u32 sysctl_rmem_default;
2269
2270#endif  /* _SOCK_H */
2271
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.