linux/net/ipv4/inet_connection_sock.c
<<
>>
Prefs
   1/*
   2 * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3 *              operating system.  INET is implemented using the  BSD Socket
   4 *              interface as the means of communication with the user level.
   5 *
   6 *              Support for INET connection oriented protocols.
   7 *
   8 * Authors:     See the TCP sources
   9 *
  10 *              This program is free software; you can redistribute it and/or
  11 *              modify it under the terms of the GNU General Public License
  12 *              as published by the Free Software Foundation; either version
  13 *              2 of the License, or(at your option) any later version.
  14 */
  15
  16#include <linux/module.h>
  17#include <linux/jhash.h>
  18
  19#include <net/inet_connection_sock.h>
  20#include <net/inet_hashtables.h>
  21#include <net/inet_timewait_sock.h>
  22#include <net/ip.h>
  23#include <net/route.h>
  24#include <net/tcp_states.h>
  25#include <net/xfrm.h>
  26
  27#ifdef INET_CSK_DEBUG
  28const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
  29EXPORT_SYMBOL(inet_csk_timer_bug_msg);
  30#endif
  31
  32/*
  33 * This struct holds the first and last local port number.
  34 */
  35struct local_ports sysctl_local_ports __read_mostly = {
  36        .lock = __SEQLOCK_UNLOCKED(sysctl_local_ports.lock),
  37        .range = { 32768, 61000 },
  38};
  39
  40unsigned long *sysctl_local_reserved_ports;
  41EXPORT_SYMBOL(sysctl_local_reserved_ports);
  42
  43void inet_get_local_port_range(int *low, int *high)
  44{
  45        unsigned seq;
  46        do {
  47                seq = read_seqbegin(&sysctl_local_ports.lock);
  48
  49                *low = sysctl_local_ports.range[0];
  50                *high = sysctl_local_ports.range[1];
  51        } while (read_seqretry(&sysctl_local_ports.lock, seq));
  52}
  53EXPORT_SYMBOL(inet_get_local_port_range);
  54
  55int inet_csk_bind_conflict(const struct sock *sk,
  56                           const struct inet_bind_bucket *tb)
  57{
  58        struct sock *sk2;
  59        struct hlist_node *node;
  60        int reuse = sk->sk_reuse;
  61
  62        /*
  63         * Unlike other sk lookup places we do not check
  64         * for sk_net here, since _all_ the socks listed
  65         * in tb->owners list belong to the same net - the
  66         * one this bucket belongs to.
  67         */
  68
  69        sk_for_each_bound(sk2, node, &tb->owners) {
  70                if (sk != sk2 &&
  71                    !inet_v6_ipv6only(sk2) &&
  72                    (!sk->sk_bound_dev_if ||
  73                     !sk2->sk_bound_dev_if ||
  74                     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
  75                        if (!reuse || !sk2->sk_reuse ||
  76                            sk2->sk_state == TCP_LISTEN) {
  77                                const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
  78                                if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
  79                                    sk2_rcv_saddr == sk_rcv_saddr(sk))
  80                                        break;
  81                        }
  82                }
  83        }
  84        return node != NULL;
  85}
  86EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
  87
  88/* Obtain a reference to a local port for the given sock,
  89 * if snum is zero it means select any available local port.
  90 */
  91int inet_csk_get_port(struct sock *sk, unsigned short snum)
  92{
  93        struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
  94        struct inet_bind_hashbucket *head;
  95        struct hlist_node *node;
  96        struct inet_bind_bucket *tb;
  97        int ret, attempts = 5;
  98        struct net *net = sock_net(sk);
  99        int smallest_size = -1, smallest_rover;
 100
 101        local_bh_disable();
 102        if (!snum) {
 103                int remaining, rover, low, high;
 104
 105again:
 106                inet_get_local_port_range(&low, &high);
 107                remaining = (high - low) + 1;
 108                smallest_rover = rover = net_random() % remaining + low;
 109
 110                smallest_size = -1;
 111                do {
 112                        if (inet_is_reserved_local_port(rover))
 113                                goto next_nolock;
 114                        head = &hashinfo->bhash[inet_bhashfn(net, rover,
 115                                        hashinfo->bhash_size)];
 116                        spin_lock(&head->lock);
 117                        inet_bind_bucket_for_each(tb, node, &head->chain)
 118                                if (net_eq(ib_net(tb), net) && tb->port == rover) {
 119                                        if (tb->fastreuse > 0 &&
 120                                            sk->sk_reuse &&
 121                                            sk->sk_state != TCP_LISTEN &&
 122                                            (tb->num_owners < smallest_size || smallest_size == -1)) {
 123                                                smallest_size = tb->num_owners;
 124                                                smallest_rover = rover;
 125                                                if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
 126                                                        snum = smallest_rover;
 127                                                        goto tb_found;
 128                                                }
 129                                        }
 130                                        if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
 131                                                snum = rover;
 132                                                goto tb_found;
 133                                        }
 134                                        goto next;
 135                                }
 136                        break;
 137                next:
 138                        spin_unlock(&head->lock);
 139                next_nolock:
 140                        if (++rover > high)
 141                                rover = low;
 142                } while (--remaining > 0);
 143
 144                /* Exhausted local port range during search?  It is not
 145                 * possible for us to be holding one of the bind hash
 146                 * locks if this test triggers, because if 'remaining'
 147                 * drops to zero, we broke out of the do/while loop at
 148                 * the top level, not from the 'break;' statement.
 149                 */
 150                ret = 1;
 151                if (remaining <= 0) {
 152                        if (smallest_size != -1) {
 153                                snum = smallest_rover;
 154                                goto have_snum;
 155                        }
 156                        goto fail;
 157                }
 158                /* OK, here is the one we will use.  HEAD is
 159                 * non-NULL and we hold it's mutex.
 160                 */
 161                snum = rover;
 162        } else {
 163have_snum:
 164                head = &hashinfo->bhash[inet_bhashfn(net, snum,
 165                                hashinfo->bhash_size)];
 166                spin_lock(&head->lock);
 167                inet_bind_bucket_for_each(tb, node, &head->chain)
 168                        if (net_eq(ib_net(tb), net) && tb->port == snum)
 169                                goto tb_found;
 170        }
 171        tb = NULL;
 172        goto tb_not_found;
 173tb_found:
 174        if (!hlist_empty(&tb->owners)) {
 175                if (tb->fastreuse > 0 &&
 176                    sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
 177                    smallest_size == -1) {
 178                        goto success;
 179                } else {
 180                        ret = 1;
 181                        if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
 182                                if (sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
 183                                    smallest_size != -1 && --attempts >= 0) {
 184                                        spin_unlock(&head->lock);
 185                                        goto again;
 186                                }
 187                                goto fail_unlock;
 188                        }
 189                }
 190        }
 191tb_not_found:
 192        ret = 1;
 193        if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep,
 194                                        net, head, snum)) == NULL)
 195                goto fail_unlock;
 196        if (hlist_empty(&tb->owners)) {
 197                if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
 198                        tb->fastreuse = 1;
 199                else
 200                        tb->fastreuse = 0;
 201        } else if (tb->fastreuse &&
 202                   (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
 203                tb->fastreuse = 0;
 204success:
 205        if (!inet_csk(sk)->icsk_bind_hash)
 206                inet_bind_hash(sk, tb, snum);
 207        WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
 208        ret = 0;
 209
 210fail_unlock:
 211        spin_unlock(&head->lock);
 212fail:
 213        local_bh_enable();
 214        return ret;
 215}
 216EXPORT_SYMBOL_GPL(inet_csk_get_port);
 217
 218/*
 219 * Wait for an incoming connection, avoid race conditions. This must be called
 220 * with the socket locked.
 221 */
 222static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
 223{
 224        struct inet_connection_sock *icsk = inet_csk(sk);
 225        DEFINE_WAIT(wait);
 226        int err;
 227
 228        /*
 229         * True wake-one mechanism for incoming connections: only
 230         * one process gets woken up, not the 'whole herd'.
 231         * Since we do not 'race & poll' for established sockets
 232         * anymore, the common case will execute the loop only once.
 233         *
 234         * Subtle issue: "add_wait_queue_exclusive()" will be added
 235         * after any current non-exclusive waiters, and we know that
 236         * it will always _stay_ after any new non-exclusive waiters
 237         * because all non-exclusive waiters are added at the
 238         * beginning of the wait-queue. As such, it's ok to "drop"
 239         * our exclusiveness temporarily when we get woken up without
 240         * having to remove and re-insert us on the wait queue.
 241         */
 242        for (;;) {
 243                prepare_to_wait_exclusive(sk_sleep(sk), &wait,
 244                                          TASK_INTERRUPTIBLE);
 245                release_sock(sk);
 246                if (reqsk_queue_empty(&icsk->icsk_accept_queue))
 247                        timeo = schedule_timeout(timeo);
 248                lock_sock(sk);
 249                err = 0;
 250                if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
 251                        break;
 252                err = -EINVAL;
 253                if (sk->sk_state != TCP_LISTEN)
 254                        break;
 255                err = sock_intr_errno(timeo);
 256                if (signal_pending(current))
 257                        break;
 258                err = -EAGAIN;
 259                if (!timeo)
 260                        break;
 261        }
 262        finish_wait(sk_sleep(sk), &wait);
 263        return err;
 264}
 265
 266/*
 267 * This will accept the next outstanding connection.
 268 */
 269struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
 270{
 271        struct inet_connection_sock *icsk = inet_csk(sk);
 272        struct sock *newsk;
 273        int error;
 274
 275        lock_sock(sk);
 276
 277        /* We need to make sure that this socket is listening,
 278         * and that it has something pending.
 279         */
 280        error = -EINVAL;
 281        if (sk->sk_state != TCP_LISTEN)
 282                goto out_err;
 283
 284        /* Find already established connection */
 285        if (reqsk_queue_empty(&icsk->icsk_accept_queue)) {
 286                long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
 287
 288                /* If this is a non blocking socket don't sleep */
 289                error = -EAGAIN;
 290                if (!timeo)
 291                        goto out_err;
 292
 293                error = inet_csk_wait_for_connect(sk, timeo);
 294                if (error)
 295                        goto out_err;
 296        }
 297
 298        newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);
 299        WARN_ON(newsk->sk_state == TCP_SYN_RECV);
 300out:
 301        release_sock(sk);
 302        return newsk;
 303out_err:
 304        newsk = NULL;
 305        *err = error;
 306        goto out;
 307}
 308EXPORT_SYMBOL(inet_csk_accept);
 309
 310/*
 311 * Using different timers for retransmit, delayed acks and probes
 312 * We may wish use just one timer maintaining a list of expire jiffies
 313 * to optimize.
 314 */
 315void inet_csk_init_xmit_timers(struct sock *sk,
 316                               void (*retransmit_handler)(unsigned long),
 317                               void (*delack_handler)(unsigned long),
 318                               void (*keepalive_handler)(unsigned long))
 319{
 320        struct inet_connection_sock *icsk = inet_csk(sk);
 321
 322        setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler,
 323                        (unsigned long)sk);
 324        setup_timer(&icsk->icsk_delack_timer, delack_handler,
 325                        (unsigned long)sk);
 326        setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk);
 327        icsk->icsk_pending = icsk->icsk_ack.pending = 0;
 328}
 329EXPORT_SYMBOL(inet_csk_init_xmit_timers);
 330
 331void inet_csk_clear_xmit_timers(struct sock *sk)
 332{
 333        struct inet_connection_sock *icsk = inet_csk(sk);
 334
 335        icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
 336
 337        sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
 338        sk_stop_timer(sk, &icsk->icsk_delack_timer);
 339        sk_stop_timer(sk, &sk->sk_timer);
 340}
 341EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
 342
 343void inet_csk_delete_keepalive_timer(struct sock *sk)
 344{
 345        sk_stop_timer(sk, &sk->sk_timer);
 346}
 347EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
 348
 349void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
 350{
 351        sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
 352}
 353EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
 354
 355struct dst_entry *inet_csk_route_req(struct sock *sk,
 356                                     struct flowi4 *fl4,
 357                                     const struct request_sock *req)
 358{
 359        struct rtable *rt;
 360        const struct inet_request_sock *ireq = inet_rsk(req);
 361        struct ip_options_rcu *opt = inet_rsk(req)->opt;
 362        struct net *net = sock_net(sk);
 363
 364        flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
 365                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
 366                           sk->sk_protocol, inet_sk_flowi_flags(sk),
 367                           (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
 368                           ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
 369        security_req_classify_flow(req, flowi4_to_flowi(fl4));
 370        rt = ip_route_output_flow(net, fl4, sk);
 371        if (IS_ERR(rt))
 372                goto no_route;
 373        if (opt && opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway)
 374                goto route_err;
 375        return &rt->dst;
 376
 377route_err:
 378        ip_rt_put(rt);
 379no_route:
 380        IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
 381        return NULL;
 382}
 383EXPORT_SYMBOL_GPL(inet_csk_route_req);
 384
 385struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
 386                                            struct sock *newsk,
 387                                            const struct request_sock *req)
 388{
 389        const struct inet_request_sock *ireq = inet_rsk(req);
 390        struct inet_sock *newinet = inet_sk(newsk);
 391        struct ip_options_rcu *opt = ireq->opt;
 392        struct net *net = sock_net(sk);
 393        struct flowi4 *fl4;
 394        struct rtable *rt;
 395
 396        fl4 = &newinet->cork.fl.u.ip4;
 397        flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
 398                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
 399                           sk->sk_protocol, inet_sk_flowi_flags(sk),
 400                           (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
 401                           ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
 402        security_req_classify_flow(req, flowi4_to_flowi(fl4));
 403        rt = ip_route_output_flow(net, fl4, sk);
 404        if (IS_ERR(rt))
 405                goto no_route;
 406        if (opt && opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway)
 407                goto route_err;
 408        return &rt->dst;
 409
 410route_err:
 411        ip_rt_put(rt);
 412no_route:
 413        IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
 414        return NULL;
 415}
 416EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
 417
 418static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
 419                                 const u32 rnd, const u32 synq_hsize)
 420{
 421        return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
 422}
 423
 424#if IS_ENABLED(CONFIG_IPV6)
 425#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
 426#else
 427#define AF_INET_FAMILY(fam) 1
 428#endif
 429
 430struct request_sock *inet_csk_search_req(const struct sock *sk,
 431                                         struct request_sock ***prevp,
 432                                         const __be16 rport, const __be32 raddr,
 433                                         const __be32 laddr)
 434{
 435        const struct inet_connection_sock *icsk = inet_csk(sk);
 436        struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
 437        struct request_sock *req, **prev;
 438
 439        for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd,
 440                                                    lopt->nr_table_entries)];
 441             (req = *prev) != NULL;
 442             prev = &req->dl_next) {
 443                const struct inet_request_sock *ireq = inet_rsk(req);
 444
 445                if (ireq->rmt_port == rport &&
 446                    ireq->rmt_addr == raddr &&
 447                    ireq->loc_addr == laddr &&
 448                    AF_INET_FAMILY(req->rsk_ops->family)) {
 449                        WARN_ON(req->sk);
 450                        *prevp = prev;
 451                        break;
 452                }
 453        }
 454
 455        return req;
 456}
 457EXPORT_SYMBOL_GPL(inet_csk_search_req);
 458
 459void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
 460                                   unsigned long timeout)
 461{
 462        struct inet_connection_sock *icsk = inet_csk(sk);
 463        struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
 464        const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
 465                                     lopt->hash_rnd, lopt->nr_table_entries);
 466
 467        reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
 468        inet_csk_reqsk_queue_added(sk, timeout);
 469}
 470EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
 471
 472/* Only thing we need from tcp.h */
 473extern int sysctl_tcp_synack_retries;
 474
 475
 476/* Decide when to expire the request and when to resend SYN-ACK */
 477static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
 478                                  const int max_retries,
 479                                  const u8 rskq_defer_accept,
 480                                  int *expire, int *resend)
 481{
 482        if (!rskq_defer_accept) {
 483                *expire = req->retrans >= thresh;
 484                *resend = 1;
 485                return;
 486        }
 487        *expire = req->retrans >= thresh &&
 488                  (!inet_rsk(req)->acked || req->retrans >= max_retries);
 489        /*
 490         * Do not resend while waiting for data after ACK,
 491         * start to resend on end of deferring period to give
 492         * last chance for data or ACK to create established socket.
 493         */
 494        *resend = !inet_rsk(req)->acked ||
 495                  req->retrans >= rskq_defer_accept - 1;
 496}
 497
 498void inet_csk_reqsk_queue_prune(struct sock *parent,
 499                                const unsigned long interval,
 500                                const unsigned long timeout,
 501                                const unsigned long max_rto)
 502{
 503        struct inet_connection_sock *icsk = inet_csk(parent);
 504        struct request_sock_queue *queue = &icsk->icsk_accept_queue;
 505        struct listen_sock *lopt = queue->listen_opt;
 506        int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
 507        int thresh = max_retries;
 508        unsigned long now = jiffies;
 509        struct request_sock **reqp, *req;
 510        int i, budget;
 511
 512        if (lopt == NULL || lopt->qlen == 0)
 513                return;
 514
 515        /* Normally all the openreqs are young and become mature
 516         * (i.e. converted to established socket) for first timeout.
 517         * If synack was not acknowledged for 3 seconds, it means
 518         * one of the following things: synack was lost, ack was lost,
 519         * rtt is high or nobody planned to ack (i.e. synflood).
 520         * When server is a bit loaded, queue is populated with old
 521         * open requests, reducing effective size of queue.
 522         * When server is well loaded, queue size reduces to zero
 523         * after several minutes of work. It is not synflood,
 524         * it is normal operation. The solution is pruning
 525         * too old entries overriding normal timeout, when
 526         * situation becomes dangerous.
 527         *
 528         * Essentially, we reserve half of room for young
 529         * embrions; and abort old ones without pity, if old
 530         * ones are about to clog our table.
 531         */
 532        if (lopt->qlen>>(lopt->max_qlen_log-1)) {
 533                int young = (lopt->qlen_young<<1);
 534
 535                while (thresh > 2) {
 536                        if (lopt->qlen < young)
 537                                break;
 538                        thresh--;
 539                        young <<= 1;
 540                }
 541        }
 542
 543        if (queue->rskq_defer_accept)
 544                max_retries = queue->rskq_defer_accept;
 545
 546        budget = 2 * (lopt->nr_table_entries / (timeout / interval));
 547        i = lopt->clock_hand;
 548
 549        do {
 550                reqp=&lopt->syn_table[i];
 551                while ((req = *reqp) != NULL) {
 552                        if (time_after_eq(now, req->expires)) {
 553                                int expire = 0, resend = 0;
 554
 555                                syn_ack_recalc(req, thresh, max_retries,
 556                                               queue->rskq_defer_accept,
 557                                               &expire, &resend);
 558                                if (req->rsk_ops->syn_ack_timeout)
 559                                        req->rsk_ops->syn_ack_timeout(parent, req);
 560                                if (!expire &&
 561                                    (!resend ||
 562                                     !req->rsk_ops->rtx_syn_ack(parent, req, NULL) ||
 563                                     inet_rsk(req)->acked)) {
 564                                        unsigned long timeo;
 565
 566                                        if (req->retrans++ == 0)
 567                                                lopt->qlen_young--;
 568                                        timeo = min((timeout << req->retrans), max_rto);
 569                                        req->expires = now + timeo;
 570                                        reqp = &req->dl_next;
 571                                        continue;
 572                                }
 573
 574                                /* Drop this request */
 575                                inet_csk_reqsk_queue_unlink(parent, req, reqp);
 576                                reqsk_queue_removed(queue, req);
 577                                reqsk_free(req);
 578                                continue;
 579                        }
 580                        reqp = &req->dl_next;
 581                }
 582
 583                i = (i + 1) & (lopt->nr_table_entries - 1);
 584
 585        } while (--budget > 0);
 586
 587        lopt->clock_hand = i;
 588
 589        if (lopt->qlen)
 590                inet_csk_reset_keepalive_timer(parent, interval);
 591}
 592EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
 593
 594/**
 595 *      inet_csk_clone_lock - clone an inet socket, and lock its clone
 596 *      @sk: the socket to clone
 597 *      @req: request_sock
 598 *      @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
 599 *
 600 *      Caller must unlock socket even in error path (bh_unlock_sock(newsk))
 601 */
 602struct sock *inet_csk_clone_lock(const struct sock *sk,
 603                                 const struct request_sock *req,
 604                                 const gfp_t priority)
 605{
 606        struct sock *newsk = sk_clone_lock(sk, priority);
 607
 608        if (newsk != NULL) {
 609                struct inet_connection_sock *newicsk = inet_csk(newsk);
 610
 611                newsk->sk_state = TCP_SYN_RECV;
 612                newicsk->icsk_bind_hash = NULL;
 613
 614                inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port;
 615                inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->loc_port);
 616                inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port;
 617                newsk->sk_write_space = sk_stream_write_space;
 618
 619                newicsk->icsk_retransmits = 0;
 620                newicsk->icsk_backoff     = 0;
 621                newicsk->icsk_probes_out  = 0;
 622
 623                /* Deinitialize accept_queue to trap illegal accesses. */
 624                memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
 625
 626                security_inet_csk_clone(newsk, req);
 627        }
 628        return newsk;
 629}
 630EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
 631
 632/*
 633 * At this point, there should be no process reference to this
 634 * socket, and thus no user references at all.  Therefore we
 635 * can assume the socket waitqueue is inactive and nobody will
 636 * try to jump onto it.
 637 */
 638void inet_csk_destroy_sock(struct sock *sk)
 639{
 640        WARN_ON(sk->sk_state != TCP_CLOSE);
 641        WARN_ON(!sock_flag(sk, SOCK_DEAD));
 642
 643        /* It cannot be in hash table! */
 644        WARN_ON(!sk_unhashed(sk));
 645
 646        /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
 647        WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
 648
 649        sk->sk_prot->destroy(sk);
 650
 651        sk_stream_kill_queues(sk);
 652
 653        xfrm_sk_free_policy(sk);
 654
 655        sk_refcnt_debug_release(sk);
 656
 657        percpu_counter_dec(sk->sk_prot->orphan_count);
 658        sock_put(sk);
 659}
 660EXPORT_SYMBOL(inet_csk_destroy_sock);
 661
 662int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
 663{
 664        struct inet_sock *inet = inet_sk(sk);
 665        struct inet_connection_sock *icsk = inet_csk(sk);
 666        int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
 667
 668        if (rc != 0)
 669                return rc;
 670
 671        sk->sk_max_ack_backlog = 0;
 672        sk->sk_ack_backlog = 0;
 673        inet_csk_delack_init(sk);
 674
 675        /* There is race window here: we announce ourselves listening,
 676         * but this transition is still not validated by get_port().
 677         * It is OK, because this socket enters to hash table only
 678         * after validation is complete.
 679         */
 680        sk->sk_state = TCP_LISTEN;
 681        if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
 682                inet->inet_sport = htons(inet->inet_num);
 683
 684                sk_dst_reset(sk);
 685                sk->sk_prot->hash(sk);
 686
 687                return 0;
 688        }
 689
 690        sk->sk_state = TCP_CLOSE;
 691        __reqsk_queue_destroy(&icsk->icsk_accept_queue);
 692        return -EADDRINUSE;
 693}
 694EXPORT_SYMBOL_GPL(inet_csk_listen_start);
 695
 696/*
 697 *      This routine closes sockets which have been at least partially
 698 *      opened, but not yet accepted.
 699 */
 700void inet_csk_listen_stop(struct sock *sk)
 701{
 702        struct inet_connection_sock *icsk = inet_csk(sk);
 703        struct request_sock *acc_req;
 704        struct request_sock *req;
 705
 706        inet_csk_delete_keepalive_timer(sk);
 707
 708        /* make all the listen_opt local to us */
 709        acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue);
 710
 711        /* Following specs, it would be better either to send FIN
 712         * (and enter FIN-WAIT-1, it is normal close)
 713         * or to send active reset (abort).
 714         * Certainly, it is pretty dangerous while synflood, but it is
 715         * bad justification for our negligence 8)
 716         * To be honest, we are not able to make either
 717         * of the variants now.                 --ANK
 718         */
 719        reqsk_queue_destroy(&icsk->icsk_accept_queue);
 720
 721        while ((req = acc_req) != NULL) {
 722                struct sock *child = req->sk;
 723
 724                acc_req = req->dl_next;
 725
 726                local_bh_disable();
 727                bh_lock_sock(child);
 728                WARN_ON(sock_owned_by_user(child));
 729                sock_hold(child);
 730
 731                sk->sk_prot->disconnect(child, O_NONBLOCK);
 732
 733                sock_orphan(child);
 734
 735                percpu_counter_inc(sk->sk_prot->orphan_count);
 736
 737                inet_csk_destroy_sock(child);
 738
 739                bh_unlock_sock(child);
 740                local_bh_enable();
 741                sock_put(child);
 742
 743                sk_acceptq_removed(sk);
 744                __reqsk_free(req);
 745        }
 746        WARN_ON(sk->sk_ack_backlog);
 747}
 748EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
 749
 750void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
 751{
 752        struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
 753        const struct inet_sock *inet = inet_sk(sk);
 754
 755        sin->sin_family         = AF_INET;
 756        sin->sin_addr.s_addr    = inet->inet_daddr;
 757        sin->sin_port           = inet->inet_dport;
 758}
 759EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
 760
 761#ifdef CONFIG_COMPAT
 762int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
 763                               char __user *optval, int __user *optlen)
 764{
 765        const struct inet_connection_sock *icsk = inet_csk(sk);
 766
 767        if (icsk->icsk_af_ops->compat_getsockopt != NULL)
 768                return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
 769                                                            optval, optlen);
 770        return icsk->icsk_af_ops->getsockopt(sk, level, optname,
 771                                             optval, optlen);
 772}
 773EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
 774
 775int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
 776                               char __user *optval, unsigned int optlen)
 777{
 778        const struct inet_connection_sock *icsk = inet_csk(sk);
 779
 780        if (icsk->icsk_af_ops->compat_setsockopt != NULL)
 781                return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
 782                                                            optval, optlen);
 783        return icsk->icsk_af_ops->setsockopt(sk, level, optname,
 784                                             optval, optlen);
 785}
 786EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
 787#endif
 788