linux/net/core/sock.c
<<
>>
Prefs
   1/*
   2 * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3 *              operating system.  INET is implemented using the  BSD Socket
   4 *              interface as the means of communication with the user level.
   5 *
   6 *              Generic socket support routines. Memory allocators, socket lock/release
   7 *              handler for protocols to use and generic option handler.
   8 *
   9 *
  10 * Authors:     Ross Biro
  11 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *              Florian La Roche, <flla@stud.uni-sb.de>
  13 *              Alan Cox, <A.Cox@swansea.ac.uk>
  14 *
  15 * Fixes:
  16 *              Alan Cox        :       Numerous verify_area() problems
  17 *              Alan Cox        :       Connecting on a connecting socket
  18 *                                      now returns an error for tcp.
  19 *              Alan Cox        :       sock->protocol is set correctly.
  20 *                                      and is not sometimes left as 0.
  21 *              Alan Cox        :       connect handles icmp errors on a
  22 *                                      connect properly. Unfortunately there
  23 *                                      is a restart syscall nasty there. I
  24 *                                      can't match BSD without hacking the C
  25 *                                      library. Ideas urgently sought!
  26 *              Alan Cox        :       Disallow bind() to addresses that are
  27 *                                      not ours - especially broadcast ones!!
  28 *              Alan Cox        :       Socket 1024 _IS_ ok for users. (fencepost)
  29 *              Alan Cox        :       sock_wfree/sock_rfree don't destroy sockets,
  30 *                                      instead they leave that for the DESTROY timer.
  31 *              Alan Cox        :       Clean up error flag in accept
  32 *              Alan Cox        :       TCP ack handling is buggy, the DESTROY timer
  33 *                                      was buggy. Put a remove_sock() in the handler
  34 *                                      for memory when we hit 0. Also altered the timer
  35 *                                      code. The ACK stuff can wait and needs major
  36 *                                      TCP layer surgery.
  37 *              Alan Cox        :       Fixed TCP ack bug, removed remove sock
  38 *                                      and fixed timer/inet_bh race.
  39 *              Alan Cox        :       Added zapped flag for TCP
  40 *              Alan Cox        :       Move kfree_skb into skbuff.c and tidied up surplus code
  41 *              Alan Cox        :       for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
  42 *              Alan Cox        :       kfree_s calls now are kfree_skbmem so we can track skb resources
  43 *              Alan Cox        :       Supports socket option broadcast now as does udp. Packet and raw need fixing.
  44 *              Alan Cox        :       Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
  45 *              Rick Sladkey    :       Relaxed UDP rules for matching packets.
  46 *              C.E.Hawkins     :       IFF_PROMISC/SIOCGHWADDR support
  47 *      Pauline Middelink       :       identd support
  48 *              Alan Cox        :       Fixed connect() taking signals I think.
  49 *              Alan Cox        :       SO_LINGER supported
  50 *              Alan Cox        :       Error reporting fixes
  51 *              Anonymous       :       inet_create tidied up (sk->reuse setting)
  52 *              Alan Cox        :       inet sockets don't set sk->type!
  53 *              Alan Cox        :       Split socket option code
  54 *              Alan Cox        :       Callbacks
  55 *              Alan Cox        :       Nagle flag for Charles & Johannes stuff
  56 *              Alex            :       Removed restriction on inet fioctl
  57 *              Alan Cox        :       Splitting INET from NET core
  58 *              Alan Cox        :       Fixed bogus SO_TYPE handling in getsockopt()
  59 *              Adam Caldwell   :       Missing return in SO_DONTROUTE/SO_DEBUG code
  60 *              Alan Cox        :       Split IP from generic code
  61 *              Alan Cox        :       New kfree_skbmem()
  62 *              Alan Cox        :       Make SO_DEBUG superuser only.
  63 *              Alan Cox        :       Allow anyone to clear SO_DEBUG
  64 *                                      (compatibility fix)
  65 *              Alan Cox        :       Added optimistic memory grabbing for AF_UNIX throughput.
  66 *              Alan Cox        :       Allocator for a socket is settable.
  67 *              Alan Cox        :       SO_ERROR includes soft errors.
  68 *              Alan Cox        :       Allow NULL arguments on some SO_ opts
  69 *              Alan Cox        :       Generic socket allocation to make hooks
  70 *                                      easier (suggested by Craig Metz).
  71 *              Michael Pall    :       SO_ERROR returns positive errno again
  72 *              Steve Whitehouse:       Added default destructor to free
  73 *                                      protocol private data.
  74 *              Steve Whitehouse:       Added various other default routines
  75 *                                      common to several socket families.
  76 *              Chris Evans     :       Call suser() check last on F_SETOWN
  77 *              Jay Schulist    :       Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
  78 *              Andi Kleen      :       Add sock_kmalloc()/sock_kfree_s()
  79 *              Andi Kleen      :       Fix write_space callback
  80 *              Chris Evans     :       Security fixes - signedness again
  81 *              Arnaldo C. Melo :       cleanups, use skb_queue_purge
  82 *
  83 * To Fix:
  84 *
  85 *
  86 *              This program is free software; you can redistribute it and/or
  87 *              modify it under the terms of the GNU General Public License
  88 *              as published by the Free Software Foundation; either version
  89 *              2 of the License, or (at your option) any later version.
  90 */
  91
  92#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  93
  94#include <linux/capability.h>
  95#include <linux/errno.h>
  96#include <linux/errqueue.h>
  97#include <linux/types.h>
  98#include <linux/socket.h>
  99#include <linux/in.h>
 100#include <linux/kernel.h>
 101#include <linux/module.h>
 102#include <linux/proc_fs.h>
 103#include <linux/seq_file.h>
 104#include <linux/sched.h>
 105#include <linux/timer.h>
 106#include <linux/string.h>
 107#include <linux/sockios.h>
 108#include <linux/net.h>
 109#include <linux/mm.h>
 110#include <linux/slab.h>
 111#include <linux/interrupt.h>
 112#include <linux/poll.h>
 113#include <linux/tcp.h>
 114#include <linux/init.h>
 115#include <linux/highmem.h>
 116#include <linux/user_namespace.h>
 117#include <linux/static_key.h>
 118#include <linux/memcontrol.h>
 119#include <linux/prefetch.h>
 120
 121#include <asm/uaccess.h>
 122
 123#include <linux/netdevice.h>
 124#include <net/protocol.h>
 125#include <linux/skbuff.h>
 126#include <net/net_namespace.h>
 127#include <net/request_sock.h>
 128#include <net/sock.h>
 129#include <linux/net_tstamp.h>
 130#include <net/xfrm.h>
 131#include <linux/ipsec.h>
 132#include <net/cls_cgroup.h>
 133#include <net/netprio_cgroup.h>
 134#include <linux/sock_diag.h>
 135
 136#include <linux/filter.h>
 137#include <net/sock_reuseport.h>
 138
 139#include <trace/events/sock.h>
 140
 141#ifdef CONFIG_INET
 142#include <net/tcp.h>
 143#endif
 144
 145#include <net/busy_poll.h>
 146
 147static DEFINE_MUTEX(proto_list_mutex);
 148static LIST_HEAD(proto_list);
 149
 150/**
 151 * sk_ns_capable - General socket capability test
 152 * @sk: Socket to use a capability on or through
 153 * @user_ns: The user namespace of the capability to use
 154 * @cap: The capability to use
 155 *
 156 * Test to see if the opener of the socket had when the socket was
 157 * created and the current process has the capability @cap in the user
 158 * namespace @user_ns.
 159 */
 160bool sk_ns_capable(const struct sock *sk,
 161                   struct user_namespace *user_ns, int cap)
 162{
 163        return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
 164                ns_capable(user_ns, cap);
 165}
 166EXPORT_SYMBOL(sk_ns_capable);
 167
 168/**
 169 * sk_capable - Socket global capability test
 170 * @sk: Socket to use a capability on or through
 171 * @cap: The global capability to use
 172 *
 173 * Test to see if the opener of the socket had when the socket was
 174 * created and the current process has the capability @cap in all user
 175 * namespaces.
 176 */
 177bool sk_capable(const struct sock *sk, int cap)
 178{
 179        return sk_ns_capable(sk, &init_user_ns, cap);
 180}
 181EXPORT_SYMBOL(sk_capable);
 182
 183/**
 184 * sk_net_capable - Network namespace socket capability test
 185 * @sk: Socket to use a capability on or through
 186 * @cap: The capability to use
 187 *
 188 * Test to see if the opener of the socket had when the socket was created
 189 * and the current process has the capability @cap over the network namespace
 190 * the socket is a member of.
 191 */
 192bool sk_net_capable(const struct sock *sk, int cap)
 193{
 194        return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
 195}
 196EXPORT_SYMBOL(sk_net_capable);
 197
 198/*
 199 * Each address family might have different locking rules, so we have
 200 * one slock key per address family:
 201 */
 202static struct lock_class_key af_family_keys[AF_MAX];
 203static struct lock_class_key af_family_slock_keys[AF_MAX];
 204
 205/*
 206 * Make lock validator output more readable. (we pre-construct these
 207 * strings build-time, so that runtime initialization of socket
 208 * locks is fast):
 209 */
 210static const char *const af_family_key_strings[AF_MAX+1] = {
 211  "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX"     , "sk_lock-AF_INET"     ,
 212  "sk_lock-AF_AX25"  , "sk_lock-AF_IPX"      , "sk_lock-AF_APPLETALK",
 213  "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE"   , "sk_lock-AF_ATMPVC"   ,
 214  "sk_lock-AF_X25"   , "sk_lock-AF_INET6"    , "sk_lock-AF_ROSE"     ,
 215  "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI"  , "sk_lock-AF_SECURITY" ,
 216  "sk_lock-AF_KEY"   , "sk_lock-AF_NETLINK"  , "sk_lock-AF_PACKET"   ,
 217  "sk_lock-AF_ASH"   , "sk_lock-AF_ECONET"   , "sk_lock-AF_ATMSVC"   ,
 218  "sk_lock-AF_RDS"   , "sk_lock-AF_SNA"      , "sk_lock-AF_IRDA"     ,
 219  "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE"  , "sk_lock-AF_LLC"      ,
 220  "sk_lock-27"       , "sk_lock-28"          , "sk_lock-AF_CAN"      ,
 221  "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
 222  "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
 223  "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
 224  "sk_lock-AF_NFC"   , "sk_lock-AF_VSOCK"    , "sk_lock-AF_KCM"      ,
 225  "sk_lock-AF_MAX"
 226};
 227static const char *const af_family_slock_key_strings[AF_MAX+1] = {
 228  "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
 229  "slock-AF_AX25"  , "slock-AF_IPX"      , "slock-AF_APPLETALK",
 230  "slock-AF_NETROM", "slock-AF_BRIDGE"   , "slock-AF_ATMPVC"   ,
 231  "slock-AF_X25"   , "slock-AF_INET6"    , "slock-AF_ROSE"     ,
 232  "slock-AF_DECnet", "slock-AF_NETBEUI"  , "slock-AF_SECURITY" ,
 233  "slock-AF_KEY"   , "slock-AF_NETLINK"  , "slock-AF_PACKET"   ,
 234  "slock-AF_ASH"   , "slock-AF_ECONET"   , "slock-AF_ATMSVC"   ,
 235  "slock-AF_RDS"   , "slock-AF_SNA"      , "slock-AF_IRDA"     ,
 236  "slock-AF_PPPOX" , "slock-AF_WANPIPE"  , "slock-AF_LLC"      ,
 237  "slock-27"       , "slock-28"          , "slock-AF_CAN"      ,
 238  "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
 239  "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
 240  "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
 241  "slock-AF_NFC"   , "slock-AF_VSOCK"    ,"slock-AF_KCM"       ,
 242  "slock-AF_MAX"
 243};
 244static const char *const af_family_clock_key_strings[AF_MAX+1] = {
 245  "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
 246  "clock-AF_AX25"  , "clock-AF_IPX"      , "clock-AF_APPLETALK",
 247  "clock-AF_NETROM", "clock-AF_BRIDGE"   , "clock-AF_ATMPVC"   ,
 248  "clock-AF_X25"   , "clock-AF_INET6"    , "clock-AF_ROSE"     ,
 249  "clock-AF_DECnet", "clock-AF_NETBEUI"  , "clock-AF_SECURITY" ,
 250  "clock-AF_KEY"   , "clock-AF_NETLINK"  , "clock-AF_PACKET"   ,
 251  "clock-AF_ASH"   , "clock-AF_ECONET"   , "clock-AF_ATMSVC"   ,
 252  "clock-AF_RDS"   , "clock-AF_SNA"      , "clock-AF_IRDA"     ,
 253  "clock-AF_PPPOX" , "clock-AF_WANPIPE"  , "clock-AF_LLC"      ,
 254  "clock-27"       , "clock-28"          , "clock-AF_CAN"      ,
 255  "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
 256  "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
 257  "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
 258  "clock-AF_NFC"   , "clock-AF_VSOCK"    , "clock-AF_KCM"      ,
 259  "clock-AF_MAX"
 260};
 261
 262/*
 263 * sk_callback_lock locking rules are per-address-family,
 264 * so split the lock classes by using a per-AF key:
 265 */
 266static struct lock_class_key af_callback_keys[AF_MAX];
 267
 268/* Take into consideration the size of the struct sk_buff overhead in the
 269 * determination of these values, since that is non-constant across
 270 * platforms.  This makes socket queueing behavior and performance
 271 * not depend upon such differences.
 272 */
 273#define _SK_MEM_PACKETS         256
 274#define _SK_MEM_OVERHEAD        SKB_TRUESIZE(256)
 275#define SK_WMEM_MAX             (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
 276#define SK_RMEM_MAX             (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
 277
 278/* Run time adjustable parameters. */
 279__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
 280EXPORT_SYMBOL(sysctl_wmem_max);
 281__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
 282EXPORT_SYMBOL(sysctl_rmem_max);
 283__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
 284__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
 285
 286/* Maximal space eaten by iovec or ancillary data plus some space */
 287int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
 288EXPORT_SYMBOL(sysctl_optmem_max);
 289
 290int sysctl_tstamp_allow_data __read_mostly = 1;
 291
 292struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
 293EXPORT_SYMBOL_GPL(memalloc_socks);
 294
 295/**
 296 * sk_set_memalloc - sets %SOCK_MEMALLOC
 297 * @sk: socket to set it on
 298 *
 299 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
 300 * It's the responsibility of the admin to adjust min_free_kbytes
 301 * to meet the requirements
 302 */
 303void sk_set_memalloc(struct sock *sk)
 304{
 305        sock_set_flag(sk, SOCK_MEMALLOC);
 306        sk->sk_allocation |= __GFP_MEMALLOC;
 307        static_key_slow_inc(&memalloc_socks);
 308}
 309EXPORT_SYMBOL_GPL(sk_set_memalloc);
 310
 311void sk_clear_memalloc(struct sock *sk)
 312{
 313        sock_reset_flag(sk, SOCK_MEMALLOC);
 314        sk->sk_allocation &= ~__GFP_MEMALLOC;
 315        static_key_slow_dec(&memalloc_socks);
 316
 317        /*
 318         * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
 319         * progress of swapping. SOCK_MEMALLOC may be cleared while
 320         * it has rmem allocations due to the last swapfile being deactivated
 321         * but there is a risk that the socket is unusable due to exceeding
 322         * the rmem limits. Reclaim the reserves and obey rmem limits again.
 323         */
 324        sk_mem_reclaim(sk);
 325}
 326EXPORT_SYMBOL_GPL(sk_clear_memalloc);
 327
 328int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 329{
 330        int ret;
 331        unsigned long pflags = current->flags;
 332
 333        /* these should have been dropped before queueing */
 334        BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
 335
 336        current->flags |= PF_MEMALLOC;
 337        ret = sk->sk_backlog_rcv(sk, skb);
 338        tsk_restore_flags(current, pflags, PF_MEMALLOC);
 339
 340        return ret;
 341}
 342EXPORT_SYMBOL(__sk_backlog_rcv);
 343
 344static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
 345{
 346        struct timeval tv;
 347
 348        if (optlen < sizeof(tv))
 349                return -EINVAL;
 350        if (copy_from_user(&tv, optval, sizeof(tv)))
 351                return -EFAULT;
 352        if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
 353                return -EDOM;
 354
 355        if (tv.tv_sec < 0) {
 356                static int warned __read_mostly;
 357
 358                *timeo_p = 0;
 359                if (warned < 10 && net_ratelimit()) {
 360                        warned++;
 361                        pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
 362                                __func__, current->comm, task_pid_nr(current));
 363                }
 364                return 0;
 365        }
 366        *timeo_p = MAX_SCHEDULE_TIMEOUT;
 367        if (tv.tv_sec == 0 && tv.tv_usec == 0)
 368                return 0;
 369        if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
 370                *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
 371        return 0;
 372}
 373
 374static void sock_warn_obsolete_bsdism(const char *name)
 375{
 376        static int warned;
 377        static char warncomm[TASK_COMM_LEN];
 378        if (strcmp(warncomm, current->comm) && warned < 5) {
 379                strcpy(warncomm,  current->comm);
 380                pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
 381                        warncomm, name);
 382                warned++;
 383        }
 384}
 385
 386static bool sock_needs_netstamp(const struct sock *sk)
 387{
 388        switch (sk->sk_family) {
 389        case AF_UNSPEC:
 390        case AF_UNIX:
 391                return false;
 392        default:
 393                return true;
 394        }
 395}
 396
 397static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
 398{
 399        if (sk->sk_flags & flags) {
 400                sk->sk_flags &= ~flags;
 401                if (sock_needs_netstamp(sk) &&
 402                    !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
 403                        net_disable_timestamp();
 404        }
 405}
 406
 407
 408int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 409{
 410        unsigned long flags;
 411        struct sk_buff_head *list = &sk->sk_receive_queue;
 412
 413        if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
 414                atomic_inc(&sk->sk_drops);
 415                trace_sock_rcvqueue_full(sk, skb);
 416                return -ENOMEM;
 417        }
 418
 419        if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
 420                atomic_inc(&sk->sk_drops);
 421                return -ENOBUFS;
 422        }
 423
 424        skb->dev = NULL;
 425        skb_set_owner_r(skb, sk);
 426
 427        /* we escape from rcu protected region, make sure we dont leak
 428         * a norefcounted dst
 429         */
 430        skb_dst_force(skb);
 431
 432        spin_lock_irqsave(&list->lock, flags);
 433        sock_skb_set_dropcount(sk, skb);
 434        __skb_queue_tail(list, skb);
 435        spin_unlock_irqrestore(&list->lock, flags);
 436
 437        if (!sock_flag(sk, SOCK_DEAD))
 438                sk->sk_data_ready(sk);
 439        return 0;
 440}
 441EXPORT_SYMBOL(__sock_queue_rcv_skb);
 442
 443int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 444{
 445        int err;
 446
 447        err = sk_filter(sk, skb);
 448        if (err)
 449                return err;
 450
 451        return __sock_queue_rcv_skb(sk, skb);
 452}
 453EXPORT_SYMBOL(sock_queue_rcv_skb);
 454
 455int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
 456                     const int nested, unsigned int trim_cap)
 457{
 458        int rc = NET_RX_SUCCESS;
 459
 460        if (sk_filter_trim_cap(sk, skb, trim_cap))
 461                goto discard_and_relse;
 462
 463        skb->dev = NULL;
 464
 465        if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
 466                atomic_inc(&sk->sk_drops);
 467                goto discard_and_relse;
 468        }
 469        if (nested)
 470                bh_lock_sock_nested(sk);
 471        else
 472                bh_lock_sock(sk);
 473        if (!sock_owned_by_user(sk)) {
 474                /*
 475                 * trylock + unlock semantics:
 476                 */
 477                mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
 478
 479                rc = sk_backlog_rcv(sk, skb);
 480
 481                mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
 482        } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
 483                bh_unlock_sock(sk);
 484                atomic_inc(&sk->sk_drops);
 485                goto discard_and_relse;
 486        }
 487
 488        bh_unlock_sock(sk);
 489out:
 490        sock_put(sk);
 491        return rc;
 492discard_and_relse:
 493        kfree_skb(skb);
 494        goto out;
 495}
 496EXPORT_SYMBOL(__sk_receive_skb);
 497
 498struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
 499{
 500        struct dst_entry *dst = __sk_dst_get(sk);
 501
 502        if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
 503                sk_tx_queue_clear(sk);
 504                RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
 505                dst_release(dst);
 506                return NULL;
 507        }
 508
 509        return dst;
 510}
 511EXPORT_SYMBOL(__sk_dst_check);
 512
 513struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
 514{
 515        struct dst_entry *dst = sk_dst_get(sk);
 516
 517        if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
 518                sk_dst_reset(sk);
 519                dst_release(dst);
 520                return NULL;
 521        }
 522
 523        return dst;
 524}
 525EXPORT_SYMBOL(sk_dst_check);
 526
 527static int sock_setbindtodevice(struct sock *sk, char __user *optval,
 528                                int optlen)
 529{
 530        int ret = -ENOPROTOOPT;
 531#ifdef CONFIG_NETDEVICES
 532        struct net *net = sock_net(sk);
 533        char devname[IFNAMSIZ];
 534        int index;
 535
 536        /* Sorry... */
 537        ret = -EPERM;
 538        if (!ns_capable(net->user_ns, CAP_NET_RAW))
 539                goto out;
 540
 541        ret = -EINVAL;
 542        if (optlen < 0)
 543                goto out;
 544
 545        /* Bind this socket to a particular device like "eth0",
 546         * as specified in the passed interface name. If the
 547         * name is "" or the option length is zero the socket
 548         * is not bound.
 549         */
 550        if (optlen > IFNAMSIZ - 1)
 551                optlen = IFNAMSIZ - 1;
 552        memset(devname, 0, sizeof(devname));
 553
 554        ret = -EFAULT;
 555        if (copy_from_user(devname, optval, optlen))
 556                goto out;
 557
 558        index = 0;
 559        if (devname[0] != '\0') {
 560                struct net_device *dev;
 561
 562                rcu_read_lock();
 563                dev = dev_get_by_name_rcu(net, devname);
 564                if (dev)
 565                        index = dev->ifindex;
 566                rcu_read_unlock();
 567                ret = -ENODEV;
 568                if (!dev)
 569                        goto out;
 570        }
 571
 572        lock_sock(sk);
 573        sk->sk_bound_dev_if = index;
 574        sk_dst_reset(sk);
 575        release_sock(sk);
 576
 577        ret = 0;
 578
 579out:
 580#endif
 581
 582        return ret;
 583}
 584
 585static int sock_getbindtodevice(struct sock *sk, char __user *optval,
 586                                int __user *optlen, int len)
 587{
 588        int ret = -ENOPROTOOPT;
 589#ifdef CONFIG_NETDEVICES
 590        struct net *net = sock_net(sk);
 591        char devname[IFNAMSIZ];
 592
 593        if (sk->sk_bound_dev_if == 0) {
 594                len = 0;
 595                goto zero;
 596        }
 597
 598        ret = -EINVAL;
 599        if (len < IFNAMSIZ)
 600                goto out;
 601
 602        ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
 603        if (ret)
 604                goto out;
 605
 606        len = strlen(devname) + 1;
 607
 608        ret = -EFAULT;
 609        if (copy_to_user(optval, devname, len))
 610                goto out;
 611
 612zero:
 613        ret = -EFAULT;
 614        if (put_user(len, optlen))
 615                goto out;
 616
 617        ret = 0;
 618
 619out:
 620#endif
 621
 622        return ret;
 623}
 624
 625static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
 626{
 627        if (valbool)
 628                sock_set_flag(sk, bit);
 629        else
 630                sock_reset_flag(sk, bit);
 631}
 632
 633bool sk_mc_loop(struct sock *sk)
 634{
 635        if (dev_recursion_level())
 636                return false;
 637        if (!sk)
 638                return true;
 639        switch (sk->sk_family) {
 640        case AF_INET:
 641                return inet_sk(sk)->mc_loop;
 642#if IS_ENABLED(CONFIG_IPV6)
 643        case AF_INET6:
 644                return inet6_sk(sk)->mc_loop;
 645#endif
 646        }
 647        WARN_ON(1);
 648        return true;
 649}
 650EXPORT_SYMBOL(sk_mc_loop);
 651
 652/*
 653 *      This is meant for all protocols to use and covers goings on
 654 *      at the socket level. Everything here is generic.
 655 */
 656
 657int sock_setsockopt(struct socket *sock, int level, int optname,
 658                    char __user *optval, unsigned int optlen)
 659{
 660        struct sock *sk = sock->sk;
 661        int val;
 662        int valbool;
 663        struct linger ling;
 664        int ret = 0;
 665
 666        /*
 667         *      Options without arguments
 668         */
 669
 670        if (optname == SO_BINDTODEVICE)
 671                return sock_setbindtodevice(sk, optval, optlen);
 672
 673        if (optlen < sizeof(int))
 674                return -EINVAL;
 675
 676        if (get_user(val, (int __user *)optval))
 677                return -EFAULT;
 678
 679        valbool = val ? 1 : 0;
 680
 681        lock_sock(sk);
 682
 683        switch (optname) {
 684        case SO_DEBUG:
 685                if (val && !capable(CAP_NET_ADMIN))
 686                        ret = -EACCES;
 687                else
 688                        sock_valbool_flag(sk, SOCK_DBG, valbool);
 689                break;
 690        case SO_REUSEADDR:
 691                sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
 692                break;
 693        case SO_REUSEPORT:
 694                sk->sk_reuseport = valbool;
 695                break;
 696        case SO_TYPE:
 697        case SO_PROTOCOL:
 698        case SO_DOMAIN:
 699        case SO_ERROR:
 700                ret = -ENOPROTOOPT;
 701                break;
 702        case SO_DONTROUTE:
 703                sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
 704                break;
 705        case SO_BROADCAST:
 706                sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
 707                break;
 708        case SO_SNDBUF:
 709                /* Don't error on this BSD doesn't and if you think
 710                 * about it this is right. Otherwise apps have to
 711                 * play 'guess the biggest size' games. RCVBUF/SNDBUF
 712                 * are treated in BSD as hints
 713                 */
 714                val = min_t(u32, val, sysctl_wmem_max);
 715set_sndbuf:
 716                sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
 717                sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
 718                /* Wake up sending tasks if we upped the value. */
 719                sk->sk_write_space(sk);
 720                break;
 721
 722        case SO_SNDBUFFORCE:
 723                if (!capable(CAP_NET_ADMIN)) {
 724                        ret = -EPERM;
 725                        break;
 726                }
 727                goto set_sndbuf;
 728
 729        case SO_RCVBUF:
 730                /* Don't error on this BSD doesn't and if you think
 731                 * about it this is right. Otherwise apps have to
 732                 * play 'guess the biggest size' games. RCVBUF/SNDBUF
 733                 * are treated in BSD as hints
 734                 */
 735                val = min_t(u32, val, sysctl_rmem_max);
 736set_rcvbuf:
 737                sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
 738                /*
 739                 * We double it on the way in to account for
 740                 * "struct sk_buff" etc. overhead.   Applications
 741                 * assume that the SO_RCVBUF setting they make will
 742                 * allow that much actual data to be received on that
 743                 * socket.
 744                 *
 745                 * Applications are unaware that "struct sk_buff" and
 746                 * other overheads allocate from the receive buffer
 747                 * during socket buffer allocation.
 748                 *
 749                 * And after considering the possible alternatives,
 750                 * returning the value we actually used in getsockopt
 751                 * is the most desirable behavior.
 752                 */
 753                sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
 754                break;
 755
 756        case SO_RCVBUFFORCE:
 757                if (!capable(CAP_NET_ADMIN)) {
 758                        ret = -EPERM;
 759                        break;
 760                }
 761                goto set_rcvbuf;
 762
 763        case SO_KEEPALIVE:
 764#ifdef CONFIG_INET
 765                if (sk->sk_protocol == IPPROTO_TCP &&
 766                    sk->sk_type == SOCK_STREAM)
 767                        tcp_set_keepalive(sk, valbool);
 768#endif
 769                sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
 770                break;
 771
 772        case SO_OOBINLINE:
 773                sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
 774                break;
 775
 776        case SO_NO_CHECK:
 777                sk->sk_no_check_tx = valbool;
 778                break;
 779
 780        case SO_PRIORITY:
 781                if ((val >= 0 && val <= 6) ||
 782                    ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 783                        sk->sk_priority = val;
 784                else
 785                        ret = -EPERM;
 786                break;
 787
 788        case SO_LINGER:
 789                if (optlen < sizeof(ling)) {
 790                        ret = -EINVAL;  /* 1003.1g */
 791                        break;
 792                }
 793                if (copy_from_user(&ling, optval, sizeof(ling))) {
 794                        ret = -EFAULT;
 795                        break;
 796                }
 797                if (!ling.l_onoff)
 798                        sock_reset_flag(sk, SOCK_LINGER);
 799                else {
 800#if (BITS_PER_LONG == 32)
 801                        if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
 802                                sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
 803                        else
 804#endif
 805                                sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
 806                        sock_set_flag(sk, SOCK_LINGER);
 807                }
 808                break;
 809
 810        case SO_BSDCOMPAT:
 811                sock_warn_obsolete_bsdism("setsockopt");
 812                break;
 813
 814        case SO_PASSCRED:
 815                if (valbool)
 816                        set_bit(SOCK_PASSCRED, &sock->flags);
 817                else
 818                        clear_bit(SOCK_PASSCRED, &sock->flags);
 819                break;
 820
 821        case SO_TIMESTAMP:
 822        case SO_TIMESTAMPNS:
 823                if (valbool)  {
 824                        if (optname == SO_TIMESTAMP)
 825                                sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
 826                        else
 827                                sock_set_flag(sk, SOCK_RCVTSTAMPNS);
 828                        sock_set_flag(sk, SOCK_RCVTSTAMP);
 829                        sock_enable_timestamp(sk, SOCK_TIMESTAMP);
 830                } else {
 831                        sock_reset_flag(sk, SOCK_RCVTSTAMP);
 832                        sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
 833                }
 834                break;
 835
 836        case SO_TIMESTAMPING:
 837                if (val & ~SOF_TIMESTAMPING_MASK) {
 838                        ret = -EINVAL;
 839                        break;
 840                }
 841
 842                if (val & SOF_TIMESTAMPING_OPT_ID &&
 843                    !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
 844                        if (sk->sk_protocol == IPPROTO_TCP &&
 845                            sk->sk_type == SOCK_STREAM) {
 846                                if ((1 << sk->sk_state) &
 847                                    (TCPF_CLOSE | TCPF_LISTEN)) {
 848                                        ret = -EINVAL;
 849                                        break;
 850                                }
 851                                sk->sk_tskey = tcp_sk(sk)->snd_una;
 852                        } else {
 853                                sk->sk_tskey = 0;
 854                        }
 855                }
 856                sk->sk_tsflags = val;
 857                if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
 858                        sock_enable_timestamp(sk,
 859                                              SOCK_TIMESTAMPING_RX_SOFTWARE);
 860                else
 861                        sock_disable_timestamp(sk,
 862                                               (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
 863                break;
 864
 865        case SO_RCVLOWAT:
 866                if (val < 0)
 867                        val = INT_MAX;
 868                sk->sk_rcvlowat = val ? : 1;
 869                break;
 870
 871        case SO_RCVTIMEO:
 872                ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
 873                break;
 874
 875        case SO_SNDTIMEO:
 876                ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
 877                break;
 878
 879        case SO_ATTACH_FILTER:
 880                ret = -EINVAL;
 881                if (optlen == sizeof(struct sock_fprog)) {
 882                        struct sock_fprog fprog;
 883
 884                        ret = -EFAULT;
 885                        if (copy_from_user(&fprog, optval, sizeof(fprog)))
 886                                break;
 887
 888                        ret = sk_attach_filter(&fprog, sk);
 889                }
 890                break;
 891
 892        case SO_ATTACH_BPF:
 893                ret = -EINVAL;
 894                if (optlen == sizeof(u32)) {
 895                        u32 ufd;
 896
 897                        ret = -EFAULT;
 898                        if (copy_from_user(&ufd, optval, sizeof(ufd)))
 899                                break;
 900
 901                        ret = sk_attach_bpf(ufd, sk);
 902                }
 903                break;
 904
 905        case SO_ATTACH_REUSEPORT_CBPF:
 906                ret = -EINVAL;
 907                if (optlen == sizeof(struct sock_fprog)) {
 908                        struct sock_fprog fprog;
 909
 910                        ret = -EFAULT;
 911                        if (copy_from_user(&fprog, optval, sizeof(fprog)))
 912                                break;
 913
 914                        ret = sk_reuseport_attach_filter(&fprog, sk);
 915                }
 916                break;
 917
 918        case SO_ATTACH_REUSEPORT_EBPF:
 919                ret = -EINVAL;
 920                if (optlen == sizeof(u32)) {
 921                        u32 ufd;
 922
 923                        ret = -EFAULT;
 924                        if (copy_from_user(&ufd, optval, sizeof(ufd)))
 925                                break;
 926
 927                        ret = sk_reuseport_attach_bpf(ufd, sk);
 928                }
 929                break;
 930
 931        case SO_DETACH_FILTER:
 932                ret = sk_detach_filter(sk);
 933                break;
 934
 935        case SO_LOCK_FILTER:
 936                if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
 937                        ret = -EPERM;
 938                else
 939                        sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
 940                break;
 941
 942        case SO_PASSSEC:
 943                if (valbool)
 944                        set_bit(SOCK_PASSSEC, &sock->flags);
 945                else
 946                        clear_bit(SOCK_PASSSEC, &sock->flags);
 947                break;
 948        case SO_MARK:
 949                if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
 950                        ret = -EPERM;
 951                else
 952                        sk->sk_mark = val;
 953                break;
 954
 955        case SO_RXQ_OVFL:
 956                sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
 957                break;
 958
 959        case SO_WIFI_STATUS:
 960                sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
 961                break;
 962
 963        case SO_PEEK_OFF:
 964                if (sock->ops->set_peek_off)
 965                        ret = sock->ops->set_peek_off(sk, val);
 966                else
 967                        ret = -EOPNOTSUPP;
 968                break;
 969
 970        case SO_NOFCS:
 971                sock_valbool_flag(sk, SOCK_NOFCS, valbool);
 972                break;
 973
 974        case SO_SELECT_ERR_QUEUE:
 975                sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
 976                break;
 977
 978#ifdef CONFIG_NET_RX_BUSY_POLL
 979        case SO_BUSY_POLL:
 980                /* allow unprivileged users to decrease the value */
 981                if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
 982                        ret = -EPERM;
 983                else {
 984                        if (val < 0)
 985                                ret = -EINVAL;
 986                        else
 987                                sk->sk_ll_usec = val;
 988                }
 989                break;
 990#endif
 991
 992        case SO_MAX_PACING_RATE:
 993                sk->sk_max_pacing_rate = val;
 994                sk->sk_pacing_rate = min(sk->sk_pacing_rate,
 995                                         sk->sk_max_pacing_rate);
 996                break;
 997
 998        case SO_INCOMING_CPU:
 999                sk->sk_incoming_cpu = val;
1000                break;
1001
1002        case SO_CNX_ADVICE:
1003                if (val == 1)
1004                        dst_negative_advice(sk);
1005                break;
1006        default:
1007                ret = -ENOPROTOOPT;
1008                break;
1009        }
1010        release_sock(sk);
1011        return ret;
1012}
1013EXPORT_SYMBOL(sock_setsockopt);
1014
1015
1016static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1017                          struct ucred *ucred)
1018{
1019        ucred->pid = pid_vnr(pid);
1020        ucred->uid = ucred->gid = -1;
1021        if (cred) {
1022                struct user_namespace *current_ns = current_user_ns();
1023
1024                ucred->uid = from_kuid_munged(current_ns, cred->euid);
1025                ucred->gid = from_kgid_munged(current_ns, cred->egid);
1026        }
1027}
1028
1029int sock_getsockopt(struct socket *sock, int level, int optname,
1030                    char __user *optval, int __user *optlen)
1031{
1032        struct sock *sk = sock->sk;
1033
1034        union {
1035                int val;
1036                struct linger ling;
1037                struct timeval tm;
1038        } v;
1039
1040        int lv = sizeof(int);
1041        int len;
1042
1043        if (get_user(len, optlen))
1044                return -EFAULT;
1045        if (len < 0)
1046                return -EINVAL;
1047
1048        memset(&v, 0, sizeof(v));
1049
1050        switch (optname) {
1051        case SO_DEBUG:
1052                v.val = sock_flag(sk, SOCK_DBG);
1053                break;
1054
1055        case SO_DONTROUTE:
1056                v.val = sock_flag(sk, SOCK_LOCALROUTE);
1057                break;
1058
1059        case SO_BROADCAST:
1060                v.val = sock_flag(sk, SOCK_BROADCAST);
1061                break;
1062
1063        case SO_SNDBUF:
1064                v.val = sk->sk_sndbuf;
1065                break;
1066
1067        case SO_RCVBUF:
1068                v.val = sk->sk_rcvbuf;
1069                break;
1070
1071        case SO_REUSEADDR:
1072                v.val = sk->sk_reuse;
1073                break;
1074
1075        case SO_REUSEPORT:
1076                v.val = sk->sk_reuseport;
1077                break;
1078
1079        case SO_KEEPALIVE:
1080                v.val = sock_flag(sk, SOCK_KEEPOPEN);
1081                break;
1082
1083        case SO_TYPE:
1084                v.val = sk->sk_type;
1085                break;
1086
1087        case SO_PROTOCOL:
1088                v.val = sk->sk_protocol;
1089                break;
1090
1091        case SO_DOMAIN:
1092                v.val = sk->sk_family;
1093                break;
1094
1095        case SO_ERROR:
1096                v.val = -sock_error(sk);
1097                if (v.val == 0)
1098                        v.val = xchg(&sk->sk_err_soft, 0);
1099                break;
1100
1101        case SO_OOBINLINE:
1102                v.val = sock_flag(sk, SOCK_URGINLINE);
1103                break;
1104
1105        case SO_NO_CHECK:
1106                v.val = sk->sk_no_check_tx;
1107                break;
1108
1109        case SO_PRIORITY:
1110                v.val = sk->sk_priority;
1111                break;
1112
1113        case SO_LINGER:
1114                lv              = sizeof(v.ling);
1115                v.ling.l_onoff  = sock_flag(sk, SOCK_LINGER);
1116                v.ling.l_linger = sk->sk_lingertime / HZ;
1117                break;
1118
1119        case SO_BSDCOMPAT:
1120                sock_warn_obsolete_bsdism("getsockopt");
1121                break;
1122
1123        case SO_TIMESTAMP:
1124                v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1125                                !sock_flag(sk, SOCK_RCVTSTAMPNS);
1126                break;
1127
1128        case SO_TIMESTAMPNS:
1129                v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
1130                break;
1131
1132        case SO_TIMESTAMPING:
1133                v.val = sk->sk_tsflags;
1134                break;
1135
1136        case SO_RCVTIMEO:
1137                lv = sizeof(struct timeval);
1138                if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
1139                        v.tm.tv_sec = 0;
1140                        v.tm.tv_usec = 0;
1141                } else {
1142                        v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
1143                        v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
1144                }
1145                break;
1146
1147        case SO_SNDTIMEO:
1148                lv = sizeof(struct timeval);
1149                if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
1150                        v.tm.tv_sec = 0;
1151                        v.tm.tv_usec = 0;
1152                } else {
1153                        v.tm.tv_sec = sk->sk_sndtimeo / HZ;
1154                        v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
1155                }
1156                break;
1157
1158        case SO_RCVLOWAT:
1159                v.val = sk->sk_rcvlowat;
1160                break;
1161
1162        case SO_SNDLOWAT:
1163                v.val = 1;
1164                break;
1165
1166        case SO_PASSCRED:
1167                v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1168                break;
1169
1170        case SO_PEERCRED:
1171        {
1172                struct ucred peercred;
1173                if (len > sizeof(peercred))
1174                        len = sizeof(peercred);
1175                cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1176                if (copy_to_user(optval, &peercred, len))
1177                        return -EFAULT;
1178                goto lenout;
1179        }
1180
1181        case SO_PEERNAME:
1182        {
1183                char address[128];
1184
1185                if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
1186                        return -ENOTCONN;
1187                if (lv < len)
1188                        return -EINVAL;
1189                if (copy_to_user(optval, address, len))
1190                        return -EFAULT;
1191                goto lenout;
1192        }
1193
1194        /* Dubious BSD thing... Probably nobody even uses it, but
1195         * the UNIX standard wants it for whatever reason... -DaveM
1196         */
1197        case SO_ACCEPTCONN:
1198                v.val = sk->sk_state == TCP_LISTEN;
1199                break;
1200
1201        case SO_PASSSEC:
1202                v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1203                break;
1204
1205        case SO_PEERSEC:
1206                return security_socket_getpeersec_stream(sock, optval, optlen, len);
1207
1208        case SO_MARK:
1209                v.val = sk->sk_mark;
1210                break;
1211
1212        case SO_RXQ_OVFL:
1213                v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1214                break;
1215
1216        case SO_WIFI_STATUS:
1217                v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1218                break;
1219
1220        case SO_PEEK_OFF:
1221                if (!sock->ops->set_peek_off)
1222                        return -EOPNOTSUPP;
1223
1224                v.val = sk->sk_peek_off;
1225                break;
1226        case SO_NOFCS:
1227                v.val = sock_flag(sk, SOCK_NOFCS);
1228                break;
1229
1230        case SO_BINDTODEVICE:
1231                return sock_getbindtodevice(sk, optval, optlen, len);
1232
1233        case SO_GET_FILTER:
1234                len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1235                if (len < 0)
1236                        return len;
1237
1238                goto lenout;
1239
1240        case SO_LOCK_FILTER:
1241                v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1242                break;
1243
1244        case SO_BPF_EXTENSIONS:
1245                v.val = bpf_tell_extensions();
1246                break;
1247
1248        case SO_SELECT_ERR_QUEUE:
1249                v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1250                break;
1251
1252#ifdef CONFIG_NET_RX_BUSY_POLL
1253        case SO_BUSY_POLL:
1254                v.val = sk->sk_ll_usec;
1255                break;
1256#endif
1257
1258        case SO_MAX_PACING_RATE:
1259                v.val = sk->sk_max_pacing_rate;
1260                break;
1261
1262        case SO_INCOMING_CPU:
1263                v.val = sk->sk_incoming_cpu;
1264                break;
1265
1266        default:
1267                /* We implement the SO_SNDLOWAT etc to not be settable
1268                 * (1003.1g 7).
1269                 */
1270                return -ENOPROTOOPT;
1271        }
1272
1273        if (len > lv)
1274                len = lv;
1275        if (copy_to_user(optval, &v, len))
1276                return -EFAULT;
1277lenout:
1278        if (put_user(len, optlen))
1279                return -EFAULT;
1280        return 0;
1281}
1282
1283/*
1284 * Initialize an sk_lock.
1285 *
1286 * (We also register the sk_lock with the lock validator.)
1287 */
1288static inline void sock_lock_init(struct sock *sk)
1289{
1290        sock_lock_init_class_and_name(sk,
1291                        af_family_slock_key_strings[sk->sk_family],
1292                        af_family_slock_keys + sk->sk_family,
1293                        af_family_key_strings[sk->sk_family],
1294                        af_family_keys + sk->sk_family);
1295}
1296
1297/*
1298 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1299 * even temporarly, because of RCU lookups. sk_node should also be left as is.
1300 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1301 */
1302static void sock_copy(struct sock *nsk, const struct sock *osk)
1303{
1304#ifdef CONFIG_SECURITY_NETWORK
1305        void *sptr = nsk->sk_security;
1306#endif
1307        memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1308
1309        memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1310               osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1311
1312#ifdef CONFIG_SECURITY_NETWORK
1313        nsk->sk_security = sptr;
1314        security_sk_clone(osk, nsk);
1315#endif
1316}
1317
1318void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1319{
1320        unsigned long nulls1, nulls2;
1321
1322        nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
1323        nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
1324        if (nulls1 > nulls2)
1325                swap(nulls1, nulls2);
1326
1327        if (nulls1 != 0)
1328                memset((char *)sk, 0, nulls1);
1329        memset((char *)sk + nulls1 + sizeof(void *), 0,
1330               nulls2 - nulls1 - sizeof(void *));
1331        memset((char *)sk + nulls2 + sizeof(void *), 0,
1332               size - nulls2 - sizeof(void *));
1333}
1334EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);
1335
1336static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1337                int family)
1338{
1339        struct sock *sk;
1340        struct kmem_cache *slab;
1341
1342        slab = prot->slab;
1343        if (slab != NULL) {
1344                sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1345                if (!sk)
1346                        return sk;
1347                if (priority & __GFP_ZERO) {
1348                        if (prot->clear_sk)
1349                                prot->clear_sk(sk, prot->obj_size);
1350                        else
1351                                sk_prot_clear_nulls(sk, prot->obj_size);
1352                }
1353        } else
1354                sk = kmalloc(prot->obj_size, priority);
1355
1356        if (sk != NULL) {
1357                kmemcheck_annotate_bitfield(sk, flags);
1358
1359                if (security_sk_alloc(sk, family, priority))
1360                        goto out_free;
1361
1362                if (!try_module_get(prot->owner))
1363                        goto out_free_sec;
1364                sk_tx_queue_clear(sk);
1365                cgroup_sk_alloc(&sk->sk_cgrp_data);
1366        }
1367
1368        return sk;
1369
1370out_free_sec:
1371        security_sk_free(sk);
1372out_free:
1373        if (slab != NULL)
1374                kmem_cache_free(slab, sk);
1375        else
1376                kfree(sk);
1377        return NULL;
1378}
1379
1380static void sk_prot_free(struct proto *prot, struct sock *sk)
1381{
1382        struct kmem_cache *slab;
1383        struct module *owner;
1384
1385        owner = prot->owner;
1386        slab = prot->slab;
1387
1388        cgroup_sk_free(&sk->sk_cgrp_data);
1389        security_sk_free(sk);
1390        if (slab != NULL)
1391                kmem_cache_free(slab, sk);
1392        else
1393                kfree(sk);
1394        module_put(owner);
1395}
1396
1397/**
1398 *      sk_alloc - All socket objects are allocated here
1399 *      @net: the applicable net namespace
1400 *      @family: protocol family
1401 *      @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1402 *      @prot: struct proto associated with this new sock instance
1403 *      @kern: is this to be a kernel socket?
1404 */
1405struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1406                      struct proto *prot, int kern)
1407{
1408        struct sock *sk;
1409
1410        sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1411        if (sk) {
1412                sk->sk_family = family;
1413                /*
1414                 * See comment in struct sock definition to understand
1415                 * why we need sk_prot_creator -acme
1416                 */
1417                sk->sk_prot = sk->sk_prot_creator = prot;
1418                sock_lock_init(sk);
1419                sk->sk_net_refcnt = kern ? 0 : 1;
1420                if (likely(sk->sk_net_refcnt))
1421                        get_net(net);
1422                sock_net_set(sk, net);
1423                atomic_set(&sk->sk_wmem_alloc, 1);
1424
1425                sock_update_classid(&sk->sk_cgrp_data);
1426                sock_update_netprioidx(&sk->sk_cgrp_data);
1427        }
1428
1429        return sk;
1430}
1431EXPORT_SYMBOL(sk_alloc);
1432
1433/* Sockets having SOCK_RCU_FREE will call this function after one RCU
1434 * grace period. This is the case for UDP sockets and TCP listeners.
1435 */
1436static void __sk_destruct(struct rcu_head *head)
1437{
1438        struct sock *sk = container_of(head, struct sock, sk_rcu);
1439        struct sk_filter *filter;
1440
1441        if (sk->sk_destruct)
1442                sk->sk_destruct(sk);
1443
1444        filter = rcu_dereference_check(sk->sk_filter,
1445                                       atomic_read(&sk->sk_wmem_alloc) == 0);
1446        if (filter) {
1447                sk_filter_uncharge(sk, filter);
1448                RCU_INIT_POINTER(sk->sk_filter, NULL);
1449        }
1450        if (rcu_access_pointer(sk->sk_reuseport_cb))
1451                reuseport_detach_sock(sk);
1452
1453        sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1454
1455        if (atomic_read(&sk->sk_omem_alloc))
1456                pr_debug("%s: optmem leakage (%d bytes) detected\n",
1457                         __func__, atomic_read(&sk->sk_omem_alloc));
1458
1459        if (sk->sk_peer_cred)
1460                put_cred(sk->sk_peer_cred);
1461        put_pid(sk->sk_peer_pid);
1462        if (likely(sk->sk_net_refcnt))
1463                put_net(sock_net(sk));
1464        sk_prot_free(sk->sk_prot_creator, sk);
1465}
1466
1467void sk_destruct(struct sock *sk)
1468{
1469        if (sock_flag(sk, SOCK_RCU_FREE))
1470                call_rcu(&sk->sk_rcu, __sk_destruct);
1471        else
1472                __sk_destruct(&sk->sk_rcu);
1473}
1474
1475static void __sk_free(struct sock *sk)
1476{
1477        if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
1478                sock_diag_broadcast_destroy(sk);
1479        else
1480                sk_destruct(sk);
1481}
1482
1483void sk_free(struct sock *sk)
1484{
1485        /*
1486         * We subtract one from sk_wmem_alloc and can know if
1487         * some packets are still in some tx queue.
1488         * If not null, sock_wfree() will call __sk_free(sk) later
1489         */
1490        if (atomic_dec_and_test(&sk->sk_wmem_alloc))
1491                __sk_free(sk);
1492}
1493EXPORT_SYMBOL(sk_free);
1494
1495/**
1496 *      sk_clone_lock - clone a socket, and lock its clone
1497 *      @sk: the socket to clone
1498 *      @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1499 *
1500 *      Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1501 */
1502struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1503{
1504        struct sock *newsk;
1505        bool is_charged = true;
1506
1507        newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1508        if (newsk != NULL) {
1509                struct sk_filter *filter;
1510
1511                sock_copy(newsk, sk);
1512
1513                /* SANITY */
1514                if (likely(newsk->sk_net_refcnt))
1515                        get_net(sock_net(newsk));
1516                sk_node_init(&newsk->sk_node);
1517                sock_lock_init(newsk);
1518                bh_lock_sock(newsk);
1519                newsk->sk_backlog.head  = newsk->sk_backlog.tail = NULL;
1520                newsk->sk_backlog.len = 0;
1521
1522                atomic_set(&newsk->sk_rmem_alloc, 0);
1523                /*
1524                 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1525                 */
1526                atomic_set(&newsk->sk_wmem_alloc, 1);
1527                atomic_set(&newsk->sk_omem_alloc, 0);
1528                skb_queue_head_init(&newsk->sk_receive_queue);
1529                skb_queue_head_init(&newsk->sk_write_queue);
1530
1531                rwlock_init(&newsk->sk_callback_lock);
1532                lockdep_set_class_and_name(&newsk->sk_callback_lock,
1533                                af_callback_keys + newsk->sk_family,
1534                                af_family_clock_key_strings[newsk->sk_family]);
1535
1536                newsk->sk_dst_cache     = NULL;
1537                newsk->sk_wmem_queued   = 0;
1538                newsk->sk_forward_alloc = 0;
1539                atomic_set(&newsk->sk_drops, 0);
1540                newsk->sk_send_head     = NULL;
1541                newsk->sk_userlocks     = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1542
1543                sock_reset_flag(newsk, SOCK_DONE);
1544                skb_queue_head_init(&newsk->sk_error_queue);
1545
1546                filter = rcu_dereference_protected(newsk->sk_filter, 1);
1547                if (filter != NULL)
1548                        /* though it's an empty new sock, the charging may fail
1549                         * if sysctl_optmem_max was changed between creation of
1550                         * original socket and cloning
1551                         */
1552                        is_charged = sk_filter_charge(newsk, filter);
1553
1554                if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
1555                        /* It is still raw copy of parent, so invalidate
1556                         * destructor and make plain sk_free() */
1557                        newsk->sk_destruct = NULL;
1558                        bh_unlock_sock(newsk);
1559                        sk_free(newsk);
1560                        newsk = NULL;
1561                        goto out;
1562                }
1563                RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
1564
1565                newsk->sk_err      = 0;
1566                newsk->sk_priority = 0;
1567                newsk->sk_incoming_cpu = raw_smp_processor_id();
1568                atomic64_set(&newsk->sk_cookie, 0);
1569                /*
1570                 * Before updating sk_refcnt, we must commit prior changes to memory
1571                 * (Documentation/RCU/rculist_nulls.txt for details)
1572                 */
1573                smp_wmb();
1574                atomic_set(&newsk->sk_refcnt, 2);
1575
1576                /*
1577                 * Increment the counter in the same struct proto as the master
1578                 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1579                 * is the same as sk->sk_prot->socks, as this field was copied
1580                 * with memcpy).
1581                 *
1582                 * This _changes_ the previous behaviour, where
1583                 * tcp_create_openreq_child always was incrementing the
1584                 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1585                 * to be taken into account in all callers. -acme
1586                 */
1587                sk_refcnt_debug_inc(newsk);
1588                sk_set_socket(newsk, NULL);
1589                newsk->sk_wq = NULL;
1590
1591                if (mem_cgroup_sockets_enabled && sk->sk_memcg)
1592                        sock_update_memcg(newsk);
1593
1594                if (newsk->sk_prot->sockets_allocated)
1595                        sk_sockets_allocated_inc(newsk);
1596
1597                if (sock_needs_netstamp(sk) &&
1598                    newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1599                        net_enable_timestamp();
1600        }
1601out:
1602        return newsk;
1603}
1604EXPORT_SYMBOL_GPL(sk_clone_lock);
1605
1606void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1607{
1608        u32 max_segs = 1;
1609
1610        sk_dst_set(sk, dst);
1611        sk->sk_route_caps = dst->dev->features;
1612        if (sk->sk_route_caps & NETIF_F_GSO)
1613                sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
1614        sk->sk_route_caps &= ~sk->sk_route_nocaps;
1615        if (sk_can_gso(sk)) {
1616                if (dst->header_len) {
1617                        sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1618                } else {
1619                        sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1620                        sk->sk_gso_max_size = dst->dev->gso_max_size;
1621                        max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
1622                }
1623        }
1624        sk->sk_gso_max_segs = max_segs;
1625}
1626EXPORT_SYMBOL_GPL(sk_setup_caps);
1627
1628/*
1629 *      Simple resource managers for sockets.
1630 */
1631
1632
1633/*
1634 * Write buffer destructor automatically called from kfree_skb.
1635 */
1636void sock_wfree(struct sk_buff *skb)
1637{
1638        struct sock *sk = skb->sk;
1639        unsigned int len = skb->truesize;
1640
1641        if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
1642                /*
1643                 * Keep a reference on sk_wmem_alloc, this will be released
1644                 * after sk_write_space() call
1645                 */
1646                atomic_sub(len - 1, &sk->sk_wmem_alloc);
1647                sk->sk_write_space(sk);
1648                len = 1;
1649        }
1650        /*
1651         * if sk_wmem_alloc reaches 0, we must finish what sk_free()
1652         * could not do because of in-flight packets
1653         */
1654        if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
1655                __sk_free(sk);
1656}
1657EXPORT_SYMBOL(sock_wfree);
1658
1659/* This variant of sock_wfree() is used by TCP,
1660 * since it sets SOCK_USE_WRITE_QUEUE.
1661 */
1662void __sock_wfree(struct sk_buff *skb)
1663{
1664        struct sock *sk = skb->sk;
1665
1666        if (atomic_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
1667                __sk_free(sk);
1668}
1669
1670void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1671{
1672        skb_orphan(skb);
1673        skb->sk = sk;
1674#ifdef CONFIG_INET
1675        if (unlikely(!sk_fullsock(sk))) {
1676                skb->destructor = sock_edemux;
1677                sock_hold(sk);
1678                return;
1679        }
1680#endif
1681        skb->destructor = sock_wfree;
1682        skb_set_hash_from_sk(skb, sk);
1683        /*
1684         * We used to take a refcount on sk, but following operation
1685         * is enough to guarantee sk_free() wont free this sock until
1686         * all in-flight packets are completed
1687         */
1688        atomic_add(skb->truesize, &sk->sk_wmem_alloc);
1689}
1690EXPORT_SYMBOL(skb_set_owner_w);
1691
1692/* This helper is used by netem, as it can hold packets in its
1693 * delay queue. We want to allow the owner socket to send more
1694 * packets, as if they were already TX completed by a typical driver.
1695 * But we also want to keep skb->sk set because some packet schedulers
1696 * rely on it (sch_fq for example). So we set skb->truesize to a small
1697 * amount (1) and decrease sk_wmem_alloc accordingly.
1698 */
1699void skb_orphan_partial(struct sk_buff *skb)
1700{
1701        /* If this skb is a TCP pure ACK or already went here,
1702         * we have nothing to do. 2 is already a very small truesize.
1703         */
1704        if (skb->truesize <= 2)
1705                return;
1706
1707        /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1708         * so we do not completely orphan skb, but transfert all
1709         * accounted bytes but one, to avoid unexpected reorders.
1710         */
1711        if (skb->destructor == sock_wfree
1712#ifdef CONFIG_INET
1713            || skb->destructor == tcp_wfree
1714#endif
1715                ) {
1716                atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
1717                skb->truesize = 1;
1718        } else {
1719                skb_orphan(skb);
1720        }
1721}
1722EXPORT_SYMBOL(skb_orphan_partial);
1723
1724/*
1725 * Read buffer destructor automatically called from kfree_skb.
1726 */
1727void sock_rfree(struct sk_buff *skb)
1728{
1729        struct sock *sk = skb->sk;
1730        unsigned int len = skb->truesize;
1731
1732        atomic_sub(len, &sk->sk_rmem_alloc);
1733        sk_mem_uncharge(sk, len);
1734}
1735EXPORT_SYMBOL(sock_rfree);
1736
1737/*
1738 * Buffer destructor for skbs that are not used directly in read or write
1739 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
1740 */
1741void sock_efree(struct sk_buff *skb)
1742{
1743        sock_put(skb->sk);
1744}
1745EXPORT_SYMBOL(sock_efree);
1746
1747kuid_t sock_i_uid(struct sock *sk)
1748{
1749        kuid_t uid;
1750
1751        read_lock_bh(&sk->sk_callback_lock);
1752        uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
1753        read_unlock_bh(&sk->sk_callback_lock);
1754        return uid;
1755}
1756EXPORT_SYMBOL(sock_i_uid);
1757
1758unsigned long sock_i_ino(struct sock *sk)
1759{
1760        unsigned long ino;
1761
1762        read_lock_bh(&sk->sk_callback_lock);
1763        ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1764        read_unlock_bh(&sk->sk_callback_lock);
1765        return ino;
1766}
1767EXPORT_SYMBOL(sock_i_ino);
1768
1769/*
1770 * Allocate a skb from the socket's send buffer.
1771 */
1772struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1773                             gfp_t priority)
1774{
1775        if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1776                struct sk_buff *skb = alloc_skb(size, priority);
1777                if (skb) {
1778                        skb_set_owner_w(skb, sk);
1779                        return skb;
1780                }
1781        }
1782        return NULL;
1783}
1784EXPORT_SYMBOL(sock_wmalloc);
1785
1786/*
1787 * Allocate a memory block from the socket's option memory buffer.
1788 */
1789void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1790{
1791        if ((unsigned int)size <= sysctl_optmem_max &&
1792            atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1793                void *mem;
1794                /* First do the add, to avoid the race if kmalloc
1795                 * might sleep.
1796                 */
1797                atomic_add(size, &sk->sk_omem_alloc);
1798                mem = kmalloc(size, priority);
1799                if (mem)
1800                        return mem;
1801                atomic_sub(size, &sk->sk_omem_alloc);
1802        }
1803        return NULL;
1804}
1805EXPORT_SYMBOL(sock_kmalloc);
1806
1807/* Free an option memory block. Note, we actually want the inline
1808 * here as this allows gcc to detect the nullify and fold away the
1809 * condition entirely.
1810 */
1811static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
1812                                  const bool nullify)
1813{
1814        if (WARN_ON_ONCE(!mem))
1815                return;
1816        if (nullify)
1817                kzfree(mem);
1818        else
1819                kfree(mem);
1820        atomic_sub(size, &sk->sk_omem_alloc);
1821}
1822
1823void sock_kfree_s(struct sock *sk, void *mem, int size)
1824{
1825        __sock_kfree_s(sk, mem, size, false);
1826}
1827EXPORT_SYMBOL(sock_kfree_s);
1828
1829void sock_kzfree_s(struct sock *sk, void *mem, int size)
1830{
1831        __sock_kfree_s(sk, mem, size, true);
1832}
1833EXPORT_SYMBOL(sock_kzfree_s);
1834
1835/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1836   I think, these locks should be removed for datagram sockets.
1837 */
1838static long sock_wait_for_wmem(struct sock *sk, long timeo)
1839{
1840        DEFINE_WAIT(wait);
1841
1842        sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1843        for (;;) {
1844                if (!timeo)
1845                        break;
1846                if (signal_pending(current))
1847                        break;
1848                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1849                prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1850                if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1851                        break;
1852                if (sk->sk_shutdown & SEND_SHUTDOWN)
1853                        break;
1854                if (sk->sk_err)
1855                        break;
1856                timeo = schedule_timeout(timeo);
1857        }
1858        finish_wait(sk_sleep(sk), &wait);
1859        return timeo;
1860}
1861
1862
1863/*
1864 *      Generic send/receive buffer handlers
1865 */
1866
1867struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1868                                     unsigned long data_len, int noblock,
1869                                     int *errcode, int max_page_order)
1870{
1871        struct sk_buff *skb;
1872        long timeo;
1873        int err;
1874
1875        timeo = sock_sndtimeo(sk, noblock);
1876        for (;;) {
1877                err = sock_error(sk);
1878                if (err != 0)
1879                        goto failure;
1880
1881                err = -EPIPE;
1882                if (sk->sk_shutdown & SEND_SHUTDOWN)
1883                        goto failure;
1884
1885                if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
1886                        break;
1887
1888                sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1889                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1890                err = -EAGAIN;
1891                if (!timeo)
1892                        goto failure;
1893                if (signal_pending(current))
1894                        goto interrupted;
1895                timeo = sock_wait_for_wmem(sk, timeo);
1896        }
1897        skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
1898                                   errcode, sk->sk_allocation);
1899        if (skb)
1900                skb_set_owner_w(skb, sk);
1901        return skb;
1902
1903interrupted:
1904        err = sock_intr_errno(timeo);
1905failure:
1906        *errcode = err;
1907        return NULL;
1908}
1909EXPORT_SYMBOL(sock_alloc_send_pskb);
1910
1911struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1912                                    int noblock, int *errcode)
1913{
1914        return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
1915}
1916EXPORT_SYMBOL(sock_alloc_send_skb);
1917
1918int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
1919                     struct sockcm_cookie *sockc)
1920{
1921        u32 tsflags;
1922
1923        switch (cmsg->cmsg_type) {
1924        case SO_MARK:
1925                if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1926                        return -EPERM;
1927                if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
1928                        return -EINVAL;
1929                sockc->mark = *(u32 *)CMSG_DATA(cmsg);
1930                break;
1931        case SO_TIMESTAMPING:
1932                if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
1933                        return -EINVAL;
1934
1935                tsflags = *(u32 *)CMSG_DATA(cmsg);
1936                if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
1937                        return -EINVAL;
1938
1939                sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
1940                sockc->tsflags |= tsflags;
1941                break;
1942        /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
1943        case SCM_RIGHTS:
1944        case SCM_CREDENTIALS:
1945                break;
1946        default:
1947                return -EINVAL;
1948        }
1949        return 0;
1950}
1951EXPORT_SYMBOL(__sock_cmsg_send);
1952
1953int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1954                   struct sockcm_cookie *sockc)
1955{
1956        struct cmsghdr *cmsg;
1957        int ret;
1958
1959        for_each_cmsghdr(cmsg, msg) {
1960                if (!CMSG_OK(msg, cmsg))
1961                        return -EINVAL;
1962                if (cmsg->cmsg_level != SOL_SOCKET)
1963                        continue;
1964                ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
1965                if (ret)
1966                        return ret;
1967        }
1968        return 0;
1969}
1970EXPORT_SYMBOL(sock_cmsg_send);
1971
1972/* On 32bit arches, an skb frag is limited to 2^15 */
1973#define SKB_FRAG_PAGE_ORDER     get_order(32768)
1974
1975/**
1976 * skb_page_frag_refill - check that a page_frag contains enough room
1977 * @sz: minimum size of the fragment we want to get
1978 * @pfrag: pointer to page_frag
1979 * @gfp: priority for memory allocation
1980 *
1981 * Note: While this allocator tries to use high order pages, there is
1982 * no guarantee that allocations succeed. Therefore, @sz MUST be
1983 * less or equal than PAGE_SIZE.
1984 */
1985bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
1986{
1987        if (pfrag->page) {
1988                if (page_ref_count(pfrag->page) == 1) {
1989                        pfrag->offset = 0;
1990                        return true;
1991                }
1992                if (pfrag->offset + sz <= pfrag->size)
1993                        return true;
1994                put_page(pfrag->page);
1995        }
1996
1997        pfrag->offset = 0;
1998        if (SKB_FRAG_PAGE_ORDER) {
1999                /* Avoid direct reclaim but allow kswapd to wake */
2000                pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
2001                                          __GFP_COMP | __GFP_NOWARN |
2002                                          __GFP_NORETRY,
2003                                          SKB_FRAG_PAGE_ORDER);
2004                if (likely(pfrag->page)) {
2005                        pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
2006                        return true;
2007                }
2008        }
2009        pfrag->page = alloc_page(gfp);
2010        if (likely(pfrag->page)) {
2011                pfrag->size = PAGE_SIZE;
2012                return true;
2013        }
2014        return false;
2015}
2016EXPORT_SYMBOL(skb_page_frag_refill);
2017
2018bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
2019{
2020        if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
2021                return true;
2022
2023        sk_enter_memory_pressure(sk);
2024        sk_stream_moderate_sndbuf(sk);
2025        return false;
2026}
2027EXPORT_SYMBOL(sk_page_frag_refill);
2028
2029static void __lock_sock(struct sock *sk)
2030        __releases(&sk->sk_lock.slock)
2031        __acquires(&sk->sk_lock.slock)
2032{
2033        DEFINE_WAIT(wait);
2034
2035        for (;;) {
2036                prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
2037                                        TASK_UNINTERRUPTIBLE);
2038                spin_unlock_bh(&sk->sk_lock.slock);
2039                schedule();
2040                spin_lock_bh(&sk->sk_lock.slock);
2041                if (!sock_owned_by_user(sk))
2042                        break;
2043        }
2044        finish_wait(&sk->sk_lock.wq, &wait);
2045}
2046
2047static void __release_sock(struct sock *sk)
2048        __releases(&sk->sk_lock.slock)
2049        __acquires(&sk->sk_lock.slock)
2050{
2051        struct sk_buff *skb, *next;
2052
2053        while ((skb = sk->sk_backlog.head) != NULL) {
2054                sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
2055
2056                spin_unlock_bh(&sk->sk_lock.slock);
2057
2058                do {
2059                        next = skb->next;
2060                        prefetch(next);
2061                        WARN_ON_ONCE(skb_dst_is_noref(skb));
2062                        skb->next = NULL;
2063                        sk_backlog_rcv(sk, skb);
2064
2065                        cond_resched();
2066
2067                        skb = next;
2068                } while (skb != NULL);
2069
2070                spin_lock_bh(&sk->sk_lock.slock);
2071        }
2072
2073        /*
2074         * Doing the zeroing here guarantee we can not loop forever
2075         * while a wild producer attempts to flood us.
2076         */
2077        sk->sk_backlog.len = 0;
2078}
2079
2080void __sk_flush_backlog(struct sock *sk)
2081{
2082        spin_lock_bh(&sk->sk_lock.slock);
2083        __release_sock(sk);
2084        spin_unlock_bh(&sk->sk_lock.slock);
2085}
2086
2087/**
2088 * sk_wait_data - wait for data to arrive at sk_receive_queue
2089 * @sk:    sock to wait on
2090 * @timeo: for how long
2091 * @skb:   last skb seen on sk_receive_queue
2092 *
2093 * Now socket state including sk->sk_err is changed only under lock,
2094 * hence we may omit checks after joining wait queue.
2095 * We check receive queue before schedule() only as optimization;
2096 * it is very likely that release_sock() added new data.
2097 */
2098int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
2099{
2100        int rc;
2101        DEFINE_WAIT(wait);
2102
2103        prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2104        sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2105        rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
2106        sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2107        finish_wait(sk_sleep(sk), &wait);
2108        return rc;
2109}
2110EXPORT_SYMBOL(sk_wait_data);
2111
2112/**
2113 *      __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2114 *      @sk: socket
2115 *      @size: memory size to allocate
2116 *      @kind: allocation type
2117 *
2118 *      If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2119 *      rmem allocation. This function assumes that protocols which have
2120 *      memory_pressure use sk_wmem_queued as write buffer accounting.
2121 */
2122int __sk_mem_schedule(struct sock *sk, int size, int kind)
2123{
2124        struct proto *prot = sk->sk_prot;
2125        int amt = sk_mem_pages(size);
2126        long allocated;
2127
2128        sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
2129
2130        allocated = sk_memory_allocated_add(sk, amt);
2131
2132        if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
2133            !mem_cgroup_charge_skmem(sk->sk_memcg, amt))
2134                goto suppress_allocation;
2135
2136        /* Under limit. */
2137        if (allocated <= sk_prot_mem_limits(sk, 0)) {
2138                sk_leave_memory_pressure(sk);
2139                return 1;
2140        }
2141
2142        /* Under pressure. */
2143        if (allocated > sk_prot_mem_limits(sk, 1))
2144                sk_enter_memory_pressure(sk);
2145
2146        /* Over hard limit. */
2147        if (allocated > sk_prot_mem_limits(sk, 2))
2148                goto suppress_allocation;
2149
2150        /* guarantee minimum buffer size under pressure */
2151        if (kind == SK_MEM_RECV) {
2152                if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
2153                        return 1;
2154
2155        } else { /* SK_MEM_SEND */
2156                if (sk->sk_type == SOCK_STREAM) {
2157                        if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
2158                                return 1;
2159                } else if (atomic_read(&sk->sk_wmem_alloc) <
2160                           prot->sysctl_wmem[0])
2161                                return 1;
2162        }
2163
2164        if (sk_has_memory_pressure(sk)) {
2165                int alloc;
2166
2167                if (!sk_under_memory_pressure(sk))
2168                        return 1;
2169                alloc = sk_sockets_allocated_read_positive(sk);
2170                if (sk_prot_mem_limits(sk, 2) > alloc *
2171                    sk_mem_pages(sk->sk_wmem_queued +
2172                                 atomic_read(&sk->sk_rmem_alloc) +
2173                                 sk->sk_forward_alloc))
2174                        return 1;
2175        }
2176
2177suppress_allocation:
2178
2179        if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2180                sk_stream_moderate_sndbuf(sk);
2181
2182                /* Fail only if socket is _under_ its sndbuf.
2183                 * In this case we cannot block, so that we have to fail.
2184                 */
2185                if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2186                        return 1;
2187        }
2188
2189        trace_sock_exceed_buf_limit(sk, prot, allocated);
2190
2191        /* Alas. Undo changes. */
2192        sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
2193
2194        sk_memory_allocated_sub(sk, amt);
2195
2196        if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2197                mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
2198
2199        return 0;
2200}
2201EXPORT_SYMBOL(__sk_mem_schedule);
2202
2203/**
2204 *      __sk_mem_reclaim - reclaim memory_allocated
2205 *      @sk: socket
2206 *      @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
2207 */
2208void __sk_mem_reclaim(struct sock *sk, int amount)
2209{
2210        amount >>= SK_MEM_QUANTUM_SHIFT;
2211        sk_memory_allocated_sub(sk, amount);
2212        sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
2213
2214        if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2215                mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
2216
2217        if (sk_under_memory_pressure(sk) &&
2218            (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2219                sk_leave_memory_pressure(sk);
2220}
2221EXPORT_SYMBOL(__sk_mem_reclaim);
2222
2223int sk_set_peek_off(struct sock *sk, int val)
2224{
2225        if (val < 0)
2226                return -EINVAL;
2227
2228        sk->sk_peek_off = val;
2229        return 0;
2230}
2231EXPORT_SYMBOL_GPL(sk_set_peek_off);
2232
2233/*
2234 * Set of default routines for initialising struct proto_ops when
2235 * the protocol does not support a particular function. In certain
2236 * cases where it makes no sense for a protocol to have a "do nothing"
2237 * function, some default processing is provided.
2238 */
2239
2240int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2241{
2242        return -EOPNOTSUPP;
2243}
2244EXPORT_SYMBOL(sock_no_bind);
2245
2246int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
2247                    int len, int flags)
2248{
2249        return -EOPNOTSUPP;
2250}
2251EXPORT_SYMBOL(sock_no_connect);
2252
2253int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2254{
2255        return -EOPNOTSUPP;
2256}
2257EXPORT_SYMBOL(sock_no_socketpair);
2258
2259int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
2260{
2261        return -EOPNOTSUPP;
2262}
2263EXPORT_SYMBOL(sock_no_accept);
2264
2265int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2266                    int *len, int peer)
2267{
2268        return -EOPNOTSUPP;
2269}
2270EXPORT_SYMBOL(sock_no_getname);
2271
2272unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
2273{
2274        return 0;
2275}
2276EXPORT_SYMBOL(sock_no_poll);
2277
2278int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2279{
2280        return -EOPNOTSUPP;
2281}
2282EXPORT_SYMBOL(sock_no_ioctl);
2283
2284int sock_no_listen(struct socket *sock, int backlog)
2285{
2286        return -EOPNOTSUPP;
2287}
2288EXPORT_SYMBOL(sock_no_listen);
2289
2290int sock_no_shutdown(struct socket *sock, int how)
2291{
2292        return -EOPNOTSUPP;
2293}
2294EXPORT_SYMBOL(sock_no_shutdown);
2295
2296int sock_no_setsockopt(struct socket *sock, int level, int optname,
2297                    char __user *optval, unsigned int optlen)
2298{
2299        return -EOPNOTSUPP;
2300}
2301EXPORT_SYMBOL(sock_no_setsockopt);
2302
2303int sock_no_getsockopt(struct socket *sock, int level, int optname,
2304                    char __user *optval, int __user *optlen)
2305{
2306        return -EOPNOTSUPP;
2307}
2308EXPORT_SYMBOL(sock_no_getsockopt);
2309
2310int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
2311{
2312        return -EOPNOTSUPP;
2313}
2314EXPORT_SYMBOL(sock_no_sendmsg);
2315
2316int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
2317                    int flags)
2318{
2319        return -EOPNOTSUPP;
2320}
2321EXPORT_SYMBOL(sock_no_recvmsg);
2322
2323int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2324{
2325        /* Mirror missing mmap method error code */
2326        return -ENODEV;
2327}
2328EXPORT_SYMBOL(sock_no_mmap);
2329
2330ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2331{
2332        ssize_t res;
2333        struct msghdr msg = {.msg_flags = flags};
2334        struct kvec iov;
2335        char *kaddr = kmap(page);
2336        iov.iov_base = kaddr + offset;
2337        iov.iov_len = size;
2338        res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2339        kunmap(page);
2340        return res;
2341}
2342EXPORT_SYMBOL(sock_no_sendpage);
2343
2344/*
2345 *      Default Socket Callbacks
2346 */
2347
2348static void sock_def_wakeup(struct sock *sk)
2349{
2350        struct socket_wq *wq;
2351
2352        rcu_read_lock();
2353        wq = rcu_dereference(sk->sk_wq);
2354        if (skwq_has_sleeper(wq))
2355                wake_up_interruptible_all(&wq->wait);
2356        rcu_read_unlock();
2357}
2358
2359static void sock_def_error_report(struct sock *sk)
2360{
2361        struct socket_wq *wq;
2362
2363        rcu_read_lock();
2364        wq = rcu_dereference(sk->sk_wq);
2365        if (skwq_has_sleeper(wq))
2366                wake_up_interruptible_poll(&wq->wait, POLLERR);
2367        sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2368        rcu_read_unlock();
2369}
2370
2371static void sock_def_readable(struct sock *sk)
2372{
2373        struct socket_wq *wq;
2374
2375        rcu_read_lock();
2376        wq = rcu_dereference(sk->sk_wq);
2377        if (skwq_has_sleeper(wq))
2378                wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
2379                                                POLLRDNORM | POLLRDBAND);
2380        sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2381        rcu_read_unlock();
2382}
2383
2384static void sock_def_write_space(struct sock *sk)
2385{
2386        struct socket_wq *wq;
2387
2388        rcu_read_lock();
2389
2390        /* Do not wake up a writer until he can make "significant"
2391         * progress.  --DaveM
2392         */
2393        if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2394                wq = rcu_dereference(sk->sk_wq);
2395                if (skwq_has_sleeper(wq))
2396                        wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
2397                                                POLLWRNORM | POLLWRBAND);
2398
2399                /* Should agree with poll, otherwise some programs break */
2400                if (sock_writeable(sk))
2401                        sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2402        }
2403
2404        rcu_read_unlock();
2405}
2406
2407static void sock_def_destruct(struct sock *sk)
2408{
2409}
2410
2411void sk_send_sigurg(struct sock *sk)
2412{
2413        if (sk->sk_socket && sk->sk_socket->file)
2414                if (send_sigurg(&sk->sk_socket->file->f_owner))
2415                        sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2416}
2417EXPORT_SYMBOL(sk_send_sigurg);
2418
2419void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2420                    unsigned long expires)
2421{
2422        if (!mod_timer(timer, expires))
2423                sock_hold(sk);
2424}
2425EXPORT_SYMBOL(sk_reset_timer);
2426
2427void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2428{
2429        if (del_timer(timer))
2430                __sock_put(sk);
2431}
2432EXPORT_SYMBOL(sk_stop_timer);
2433
2434void sock_init_data(struct socket *sock, struct sock *sk)
2435{
2436        skb_queue_head_init(&sk->sk_receive_queue);
2437        skb_queue_head_init(&sk->sk_write_queue);
2438        skb_queue_head_init(&sk->sk_error_queue);
2439
2440        sk->sk_send_head        =       NULL;
2441
2442        init_timer(&sk->sk_timer);
2443
2444        sk->sk_allocation       =       GFP_KERNEL;
2445        sk->sk_rcvbuf           =       sysctl_rmem_default;
2446        sk->sk_sndbuf           =       sysctl_wmem_default;
2447        sk->sk_state            =       TCP_CLOSE;
2448        sk_set_socket(sk, sock);
2449
2450        sock_set_flag(sk, SOCK_ZAPPED);
2451
2452        if (sock) {
2453                sk->sk_type     =       sock->type;
2454                sk->sk_wq       =       sock->wq;
2455                sock->sk        =       sk;
2456        } else
2457                sk->sk_wq       =       NULL;
2458
2459        rwlock_init(&sk->sk_callback_lock);
2460        lockdep_set_class_and_name(&sk->sk_callback_lock,
2461                        af_callback_keys + sk->sk_family,
2462                        af_family_clock_key_strings[sk->sk_family]);
2463
2464        sk->sk_state_change     =       sock_def_wakeup;
2465        sk->sk_data_ready       =       sock_def_readable;
2466        sk->sk_write_space      =       sock_def_write_space;
2467        sk->sk_error_report     =       sock_def_error_report;
2468        sk->sk_destruct         =       sock_def_destruct;
2469
2470        sk->sk_frag.page        =       NULL;
2471        sk->sk_frag.offset      =       0;
2472        sk->sk_peek_off         =       -1;
2473
2474        sk->sk_peer_pid         =       NULL;
2475        sk->sk_peer_cred        =       NULL;
2476        sk->sk_write_pending    =       0;
2477        sk->sk_rcvlowat         =       1;
2478        sk->sk_rcvtimeo         =       MAX_SCHEDULE_TIMEOUT;
2479        sk->sk_sndtimeo         =       MAX_SCHEDULE_TIMEOUT;
2480
2481        sk->sk_stamp = ktime_set(-1L, 0);
2482
2483#ifdef CONFIG_NET_RX_BUSY_POLL
2484        sk->sk_napi_id          =       0;
2485        sk->sk_ll_usec          =       sysctl_net_busy_read;
2486#endif
2487
2488        sk->sk_max_pacing_rate = ~0U;
2489        sk->sk_pacing_rate = ~0U;
2490        sk->sk_incoming_cpu = -1;
2491        /*
2492         * Before updating sk_refcnt, we must commit prior changes to memory
2493         * (Documentation/RCU/rculist_nulls.txt for details)
2494         */
2495        smp_wmb();
2496        atomic_set(&sk->sk_refcnt, 1);
2497        atomic_set(&sk->sk_drops, 0);
2498}
2499EXPORT_SYMBOL(sock_init_data);
2500
2501void lock_sock_nested(struct sock *sk, int subclass)
2502{
2503        might_sleep();
2504        spin_lock_bh(&sk->sk_lock.slock);
2505        if (sk->sk_lock.owned)
2506                __lock_sock(sk);
2507        sk->sk_lock.owned = 1;
2508        spin_unlock(&sk->sk_lock.slock);
2509        /*
2510         * The sk_lock has mutex_lock() semantics here:
2511         */
2512        mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
2513        local_bh_enable();
2514}
2515EXPORT_SYMBOL(lock_sock_nested);
2516
2517void release_sock(struct sock *sk)
2518{
2519        spin_lock_bh(&sk->sk_lock.slock);
2520        if (sk->sk_backlog.tail)
2521                __release_sock(sk);
2522
2523        /* Warning : release_cb() might need to release sk ownership,
2524         * ie call sock_release_ownership(sk) before us.
2525         */
2526        if (sk->sk_prot->release_cb)
2527                sk->sk_prot->release_cb(sk);
2528
2529        sock_release_ownership(sk);
2530        if (waitqueue_active(&sk->sk_lock.wq))
2531                wake_up(&sk->sk_lock.wq);
2532        spin_unlock_bh(&sk->sk_lock.slock);
2533}
2534EXPORT_SYMBOL(release_sock);
2535
2536/**
2537 * lock_sock_fast - fast version of lock_sock
2538 * @sk: socket
2539 *
2540 * This version should be used for very small section, where process wont block
2541 * return false if fast path is taken
2542 *   sk_lock.slock locked, owned = 0, BH disabled
2543 * return true if slow path is taken
2544 *   sk_lock.slock unlocked, owned = 1, BH enabled
2545 */
2546bool lock_sock_fast(struct sock *sk)
2547{
2548        might_sleep();
2549        spin_lock_bh(&sk->sk_lock.slock);
2550
2551        if (!sk->sk_lock.owned)
2552                /*
2553                 * Note : We must disable BH
2554                 */
2555                return false;
2556
2557        __lock_sock(sk);
2558        sk->sk_lock.owned = 1;
2559        spin_unlock(&sk->sk_lock.slock);
2560        /*
2561         * The sk_lock has mutex_lock() semantics here:
2562         */
2563        mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
2564        local_bh_enable();
2565        return true;
2566}
2567EXPORT_SYMBOL(lock_sock_fast);
2568
2569int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2570{
2571        struct timeval tv;
2572        if (!sock_flag(sk, SOCK_TIMESTAMP))
2573                sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2574        tv = ktime_to_timeval(sk->sk_stamp);
2575        if (tv.tv_sec == -1)
2576                return -ENOENT;
2577        if (tv.tv_sec == 0) {
2578                sk->sk_stamp = ktime_get_real();
2579                tv = ktime_to_timeval(sk->sk_stamp);
2580        }
2581        return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2582}
2583EXPORT_SYMBOL(sock_get_timestamp);
2584
2585int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
2586{
2587        struct timespec ts;
2588        if (!sock_flag(sk, SOCK_TIMESTAMP))
2589                sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2590        ts = ktime_to_timespec(sk->sk_stamp);
2591        if (ts.tv_sec == -1)
2592                return -ENOENT;
2593        if (ts.tv_sec == 0) {
2594                sk->sk_stamp = ktime_get_real();
2595                ts = ktime_to_timespec(sk->sk_stamp);
2596        }
2597        return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
2598}
2599EXPORT_SYMBOL(sock_get_timestampns);
2600
2601void sock_enable_timestamp(struct sock *sk, int flag)
2602{
2603        if (!sock_flag(sk, flag)) {
2604                unsigned long previous_flags = sk->sk_flags;
2605
2606                sock_set_flag(sk, flag);
2607                /*
2608                 * we just set one of the two flags which require net
2609                 * time stamping, but time stamping might have been on
2610                 * already because of the other one
2611                 */
2612                if (sock_needs_netstamp(sk) &&
2613                    !(previous_flags & SK_FLAGS_TIMESTAMP))
2614                        net_enable_timestamp();
2615        }
2616}
2617
2618int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
2619                       int level, int type)
2620{
2621        struct sock_exterr_skb *serr;
2622        struct sk_buff *skb;
2623        int copied, err;
2624
2625        err = -EAGAIN;
2626        skb = sock_dequeue_err_skb(sk);
2627        if (skb == NULL)
2628                goto out;
2629
2630        copied = skb->len;
2631        if (copied > len) {
2632                msg->msg_flags |= MSG_TRUNC;
2633                copied = len;
2634        }
2635        err = skb_copy_datagram_msg(skb, 0, msg, copied);
2636        if (err)
2637                goto out_free_skb;
2638
2639        sock_recv_timestamp(msg, sk, skb);
2640
2641        serr = SKB_EXT_ERR(skb);
2642        put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
2643
2644        msg->msg_flags |= MSG_ERRQUEUE;
2645        err = copied;
2646
2647out_free_skb:
2648        kfree_skb(skb);
2649out:
2650        return err;
2651}
2652EXPORT_SYMBOL(sock_recv_errqueue);
2653
2654/*
2655 *      Get a socket option on an socket.
2656 *
2657 *      FIX: POSIX 1003.1g is very ambiguous here. It states that
2658 *      asynchronous errors should be reported by getsockopt. We assume
2659 *      this means if you specify SO_ERROR (otherwise whats the point of it).
2660 */
2661int sock_common_getsockopt(struct socket *sock, int level, int optname,
2662                           char __user *optval, int __user *optlen)
2663{
2664        struct sock *sk = sock->sk;
2665
2666        return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2667}
2668EXPORT_SYMBOL(sock_common_getsockopt);
2669
2670#ifdef CONFIG_COMPAT
2671int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
2672                                  char __user *optval, int __user *optlen)
2673{
2674        struct sock *sk = sock->sk;
2675
2676        if (sk->sk_prot->compat_getsockopt != NULL)
2677                return sk->sk_prot->compat_getsockopt(sk, level, optname,
2678                                                      optval, optlen);
2679        return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
2680}
2681EXPORT_SYMBOL(compat_sock_common_getsockopt);
2682#endif
2683
2684int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2685                        int flags)
2686{
2687        struct sock *sk = sock->sk;
2688        int addr_len = 0;
2689        int err;
2690
2691        err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
2692                                   flags & ~MSG_DONTWAIT, &addr_len);
2693        if (err >= 0)
2694                msg->msg_namelen = addr_len;
2695        return err;
2696}
2697EXPORT_SYMBOL(sock_common_recvmsg);
2698
2699/*
2700 *      Set socket options on an inet socket.
2701 */
2702int sock_common_setsockopt(struct socket *sock, int level, int optname,
2703                           char __user *optval, unsigned int optlen)
2704{
2705        struct sock *sk = sock->sk;
2706
2707        return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2708}
2709EXPORT_SYMBOL(sock_common_setsockopt);
2710
2711#ifdef CONFIG_COMPAT
2712int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
2713                                  char __user *optval, unsigned int optlen)
2714{
2715        struct sock *sk = sock->sk;
2716
2717        if (sk->sk_prot->compat_setsockopt != NULL)
2718                return sk->sk_prot->compat_setsockopt(sk, level, optname,
2719                                                      optval, optlen);
2720        return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
2721}
2722EXPORT_SYMBOL(compat_sock_common_setsockopt);
2723#endif
2724
2725void sk_common_release(struct sock *sk)
2726{
2727        if (sk->sk_prot->destroy)
2728                sk->sk_prot->destroy(sk);
2729
2730        /*
2731         * Observation: when sock_common_release is called, processes have
2732         * no access to socket. But net still has.
2733         * Step one, detach it from networking:
2734         *
2735         * A. Remove from hash tables.
2736         */
2737
2738        sk->sk_prot->unhash(sk);
2739
2740        /*
2741         * In this point socket cannot receive new packets, but it is possible
2742         * that some packets are in flight because some CPU runs receiver and
2743         * did hash table lookup before we unhashed socket. They will achieve
2744         * receive queue and will be purged by socket destructor.
2745         *
2746         * Also we still have packets pending on receive queue and probably,
2747         * our own packets waiting in device queues. sock_destroy will drain
2748         * receive queue, but transmitted packets will delay socket destruction
2749         * until the last reference will be released.
2750         */
2751
2752        sock_orphan(sk);
2753
2754        xfrm_sk_free_policy(sk);
2755
2756        sk_refcnt_debug_release(sk);
2757
2758        if (sk->sk_frag.page) {
2759                put_page(sk->sk_frag.page);
2760                sk->sk_frag.page = NULL;
2761        }
2762
2763        sock_put(sk);
2764}
2765EXPORT_SYMBOL(sk_common_release);
2766
2767#ifdef CONFIG_PROC_FS
2768#define PROTO_INUSE_NR  64      /* should be enough for the first time */
2769struct prot_inuse {
2770        int val[PROTO_INUSE_NR];
2771};
2772
2773static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2774
2775#ifdef CONFIG_NET_NS
2776void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2777{
2778        __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2779}
2780EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2781
2782int sock_prot_inuse_get(struct net *net, struct proto *prot)
2783{
2784        int cpu, idx = prot->inuse_idx;
2785        int res = 0;
2786
2787        for_each_possible_cpu(cpu)
2788                res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
2789
2790        return res >= 0 ? res : 0;
2791}
2792EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2793
2794static int __net_init sock_inuse_init_net(struct net *net)
2795{
2796        net->core.inuse = alloc_percpu(struct prot_inuse);
2797        return net->core.inuse ? 0 : -ENOMEM;
2798}
2799
2800static void __net_exit sock_inuse_exit_net(struct net *net)
2801{
2802        free_percpu(net->core.inuse);
2803}
2804
2805static struct pernet_operations net_inuse_ops = {
2806        .init = sock_inuse_init_net,
2807        .exit = sock_inuse_exit_net,
2808};
2809
2810static __init int net_inuse_init(void)
2811{
2812        if (register_pernet_subsys(&net_inuse_ops))
2813                panic("Cannot initialize net inuse counters");
2814
2815        return 0;
2816}
2817
2818core_initcall(net_inuse_init);
2819#else
2820static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2821
2822void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2823{
2824        __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2825}
2826EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2827
2828int sock_prot_inuse_get(struct net *net, struct proto *prot)
2829{
2830        int cpu, idx = prot->inuse_idx;
2831        int res = 0;
2832
2833        for_each_possible_cpu(cpu)
2834                res += per_cpu(prot_inuse, cpu).val[idx];
2835
2836        return res >= 0 ? res : 0;
2837}
2838EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
2839#endif
2840
2841static void assign_proto_idx(struct proto *prot)
2842{
2843        prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
2844
2845        if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
2846                pr_err("PROTO_INUSE_NR exhausted\n");
2847                return;
2848        }
2849
2850        set_bit(prot->inuse_idx, proto_inuse_idx);
2851}
2852
2853static void release_proto_idx(struct proto *prot)
2854{
2855        if (prot->inuse_idx != PROTO_INUSE_NR - 1)
2856                clear_bit(prot->inuse_idx, proto_inuse_idx);
2857}
2858#else
2859static inline void assign_proto_idx(struct proto *prot)
2860{
2861}
2862
2863static inline void release_proto_idx(struct proto *prot)
2864{
2865}
2866#endif
2867
2868static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
2869{
2870        if (!rsk_prot)
2871                return;
2872        kfree(rsk_prot->slab_name);
2873        rsk_prot->slab_name = NULL;
2874        kmem_cache_destroy(rsk_prot->slab);
2875        rsk_prot->slab = NULL;
2876}
2877
2878static int req_prot_init(const struct proto *prot)
2879{
2880        struct request_sock_ops *rsk_prot = prot->rsk_prot;
2881
2882        if (!rsk_prot)
2883                return 0;
2884
2885        rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
2886                                        prot->name);
2887        if (!rsk_prot->slab_name)
2888                return -ENOMEM;
2889
2890        rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
2891                                           rsk_prot->obj_size, 0,
2892                                           prot->slab_flags, NULL);
2893
2894        if (!rsk_prot->slab) {
2895                pr_crit("%s: Can't create request sock SLAB cache!\n",
2896                        prot->name);
2897                return -ENOMEM;
2898        }
2899        return 0;
2900}
2901
2902int proto_register(struct proto *prot, int alloc_slab)
2903{
2904        if (alloc_slab) {
2905                prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
2906                                        SLAB_HWCACHE_ALIGN | prot->slab_flags,
2907                                        NULL);
2908
2909                if (prot->slab == NULL) {
2910                        pr_crit("%s: Can't create sock SLAB cache!\n",
2911                                prot->name);
2912                        goto out;
2913                }
2914
2915                if (req_prot_init(prot))
2916                        goto out_free_request_sock_slab;
2917
2918                if (prot->twsk_prot != NULL) {
2919                        prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
2920
2921                        if (prot->twsk_prot->twsk_slab_name == NULL)
2922                                goto out_free_request_sock_slab;
2923
2924                        prot->twsk_prot->twsk_slab =
2925                                kmem_cache_create(prot->twsk_prot->twsk_slab_name,
2926                                                  prot->twsk_prot->twsk_obj_size,
2927                                                  0,
2928                                                  prot->slab_flags,
2929                                                  NULL);
2930                        if (prot->twsk_prot->twsk_slab == NULL)
2931                                goto out_free_timewait_sock_slab_name;
2932                }
2933        }
2934
2935        mutex_lock(&proto_list_mutex);
2936        list_add(&prot->node, &proto_list);
2937        assign_proto_idx(prot);
2938        mutex_unlock(&proto_list_mutex);
2939        return 0;
2940
2941out_free_timewait_sock_slab_name:
2942        kfree(prot->twsk_prot->twsk_slab_name);
2943out_free_request_sock_slab:
2944        req_prot_cleanup(prot->rsk_prot);
2945
2946        kmem_cache_destroy(prot->slab);
2947        prot->slab = NULL;
2948out:
2949        return -ENOBUFS;
2950}
2951EXPORT_SYMBOL(proto_register);
2952
2953void proto_unregister(struct proto *prot)
2954{
2955        mutex_lock(&proto_list_mutex);
2956        release_proto_idx(prot);
2957        list_del(&prot->node);
2958        mutex_unlock(&proto_list_mutex);
2959
2960        kmem_cache_destroy(prot->slab);
2961        prot->slab = NULL;
2962
2963        req_prot_cleanup(prot->rsk_prot);
2964
2965        if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
2966                kmem_cache_destroy(prot->twsk_prot->twsk_slab);
2967                kfree(prot->twsk_prot->twsk_slab_name);
2968                prot->twsk_prot->twsk_slab = NULL;
2969        }
2970}
2971EXPORT_SYMBOL(proto_unregister);
2972
2973#ifdef CONFIG_PROC_FS
2974static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
2975        __acquires(proto_list_mutex)
2976{
2977        mutex_lock(&proto_list_mutex);
2978        return seq_list_start_head(&proto_list, *pos);
2979}
2980
2981static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2982{
2983        return seq_list_next(v, &proto_list, pos);
2984}
2985
2986static void proto_seq_stop(struct seq_file *seq, void *v)
2987        __releases(proto_list_mutex)
2988{
2989        mutex_unlock(&proto_list_mutex);
2990}
2991
2992static char proto_method_implemented(const void *method)
2993{
2994        return method == NULL ? 'n' : 'y';
2995}
2996static long sock_prot_memory_allocated(struct proto *proto)
2997{
2998        return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
2999}
3000
3001static char *sock_prot_memory_pressure(struct proto *proto)
3002{
3003        return proto->memory_pressure != NULL ?
3004        proto_memory_pressure(proto) ? "yes" : "no" : "NI";
3005}
3006
3007static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
3008{
3009
3010        seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
3011                        "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
3012                   proto->name,
3013                   proto->obj_size,
3014                   sock_prot_inuse_get(seq_file_net(seq), proto),
3015                   sock_prot_memory_allocated(proto),
3016                   sock_prot_memory_pressure(proto),
3017                   proto->max_header,
3018                   proto->slab == NULL ? "no" : "yes",
3019                   module_name(proto->owner),
3020                   proto_method_implemented(proto->close),
3021                   proto_method_implemented(proto->connect),
3022                   proto_method_implemented(proto->disconnect),
3023                   proto_method_implemented(proto->accept),
3024                   proto_method_implemented(proto->ioctl),
3025                   proto_method_implemented(proto->init),
3026                   proto_method_implemented(proto->destroy),
3027                   proto_method_implemented(proto->shutdown),
3028                   proto_method_implemented(proto->setsockopt),
3029                   proto_method_implemented(proto->getsockopt),
3030                   proto_method_implemented(proto->sendmsg),
3031                   proto_method_implemented(proto->recvmsg),
3032                   proto_method_implemented(proto->sendpage),
3033                   proto_method_implemented(proto->bind),
3034                   proto_method_implemented(proto->backlog_rcv),
3035                   proto_method_implemented(proto->hash),
3036                   proto_method_implemented(proto->unhash),
3037                   proto_method_implemented(proto->get_port),
3038                   proto_method_implemented(proto->enter_memory_pressure));
3039}
3040
3041static int proto_seq_show(struct seq_file *seq, void *v)
3042{
3043        if (v == &proto_list)
3044                seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
3045                           "protocol",
3046                           "size",
3047                           "sockets",
3048                           "memory",
3049                           "press",
3050                           "maxhdr",
3051                           "slab",
3052                           "module",
3053                           "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
3054        else
3055                proto_seq_printf(seq, list_entry(v, struct proto, node));
3056        return 0;
3057}
3058
3059static const struct seq_operations proto_seq_ops = {
3060        .start  = proto_seq_start,
3061        .next   = proto_seq_next,
3062        .stop   = proto_seq_stop,
3063        .show   = proto_seq_show,
3064};
3065
3066static int proto_seq_open(struct inode *inode, struct file *file)
3067{
3068        return seq_open_net(inode, file, &proto_seq_ops,
3069                            sizeof(struct seq_net_private));
3070}
3071
3072static const struct file_operations proto_seq_fops = {
3073        .owner          = THIS_MODULE,
3074        .open           = proto_seq_open,
3075        .read           = seq_read,
3076        .llseek         = seq_lseek,
3077        .release        = seq_release_net,
3078};
3079
3080static __net_init int proto_init_net(struct net *net)
3081{
3082        if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops))
3083                return -ENOMEM;
3084
3085        return 0;
3086}
3087
3088static __net_exit void proto_exit_net(struct net *net)
3089{
3090        remove_proc_entry("protocols", net->proc_net);
3091}
3092
3093
3094static __net_initdata struct pernet_operations proto_net_ops = {
3095        .init = proto_init_net,
3096        .exit = proto_exit_net,
3097};
3098
3099static int __init proto_init(void)
3100{
3101        return register_pernet_subsys(&proto_net_ops);
3102}
3103
3104subsys_initcall(proto_init);
3105
3106#endif /* PROC_FS */
3107
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.