linux/net/ipv4/inet_connection_sock.c
<<
>>
Prefs
   1/*
   2 * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3 *              operating system.  INET is implemented using the  BSD Socket
   4 *              interface as the means of communication with the user level.
   5 *
   6 *              Support for INET connection oriented protocols.
   7 *
   8 * Authors:     See the TCP sources
   9 *
  10 *              This program is free software; you can redistribute it and/or
  11 *              modify it under the terms of the GNU General Public License
  12 *              as published by the Free Software Foundation; either version
  13 *              2 of the License, or(at your option) any later version.
  14 */
  15
  16#include <linux/module.h>
  17#include <linux/jhash.h>
  18
  19#include <net/inet_connection_sock.h>
  20#include <net/inet_hashtables.h>
  21#include <net/inet_timewait_sock.h>
  22#include <net/ip.h>
  23#include <net/route.h>
  24#include <net/tcp_states.h>
  25#include <net/xfrm.h>
  26
  27#ifdef INET_CSK_DEBUG
  28const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
  29EXPORT_SYMBOL(inet_csk_timer_bug_msg);
  30#endif
  31
  32/*
  33 * This struct holds the first and last local port number.
  34 */
  35struct local_ports sysctl_local_ports __read_mostly = {
  36        .lock = __SEQLOCK_UNLOCKED(sysctl_local_ports.lock),
  37        .range = { 32768, 61000 },
  38};
  39
  40unsigned long *sysctl_local_reserved_ports;
  41EXPORT_SYMBOL(sysctl_local_reserved_ports);
  42
  43void inet_get_local_port_range(int *low, int *high)
  44{
  45        unsigned int seq;
  46
  47        do {
  48                seq = read_seqbegin(&sysctl_local_ports.lock);
  49
  50                *low = sysctl_local_ports.range[0];
  51                *high = sysctl_local_ports.range[1];
  52        } while (read_seqretry(&sysctl_local_ports.lock, seq));
  53}
  54EXPORT_SYMBOL(inet_get_local_port_range);
  55
  56int inet_csk_bind_conflict(const struct sock *sk,
  57                           const struct inet_bind_bucket *tb, bool relax)
  58{
  59        struct sock *sk2;
  60        struct hlist_node *node;
  61        int reuse = sk->sk_reuse;
  62
  63        /*
  64         * Unlike other sk lookup places we do not check
  65         * for sk_net here, since _all_ the socks listed
  66         * in tb->owners list belong to the same net - the
  67         * one this bucket belongs to.
  68         */
  69
  70        sk_for_each_bound(sk2, node, &tb->owners) {
  71                if (sk != sk2 &&
  72                    !inet_v6_ipv6only(sk2) &&
  73                    (!sk->sk_bound_dev_if ||
  74                     !sk2->sk_bound_dev_if ||
  75                     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
  76                        if (!reuse || !sk2->sk_reuse ||
  77                            sk2->sk_state == TCP_LISTEN) {
  78                                const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
  79                                if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
  80                                    sk2_rcv_saddr == sk_rcv_saddr(sk))
  81                                        break;
  82                        }
  83                        if (!relax && reuse && sk2->sk_reuse &&
  84                            sk2->sk_state != TCP_LISTEN) {
  85                                const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
  86
  87                                if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
  88                                    sk2_rcv_saddr == sk_rcv_saddr(sk))
  89                                        break;
  90                        }
  91                }
  92        }
  93        return node != NULL;
  94}
  95EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
  96
  97/* Obtain a reference to a local port for the given sock,
  98 * if snum is zero it means select any available local port.
  99 */
 100int inet_csk_get_port(struct sock *sk, unsigned short snum)
 101{
 102        struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
 103        struct inet_bind_hashbucket *head;
 104        struct hlist_node *node;
 105        struct inet_bind_bucket *tb;
 106        int ret, attempts = 5;
 107        struct net *net = sock_net(sk);
 108        int smallest_size = -1, smallest_rover;
 109
 110        local_bh_disable();
 111        if (!snum) {
 112                int remaining, rover, low, high;
 113
 114again:
 115                inet_get_local_port_range(&low, &high);
 116                remaining = (high - low) + 1;
 117                smallest_rover = rover = net_random() % remaining + low;
 118
 119                smallest_size = -1;
 120                do {
 121                        if (inet_is_reserved_local_port(rover))
 122                                goto next_nolock;
 123                        head = &hashinfo->bhash[inet_bhashfn(net, rover,
 124                                        hashinfo->bhash_size)];
 125                        spin_lock(&head->lock);
 126                        inet_bind_bucket_for_each(tb, node, &head->chain)
 127                                if (net_eq(ib_net(tb), net) && tb->port == rover) {
 128                                        if (tb->fastreuse > 0 &&
 129                                            sk->sk_reuse &&
 130                                            sk->sk_state != TCP_LISTEN &&
 131                                            (tb->num_owners < smallest_size || smallest_size == -1)) {
 132                                                smallest_size = tb->num_owners;
 133                                                smallest_rover = rover;
 134                                                if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
 135                                                    !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
 136                                                        snum = smallest_rover;
 137                                                        goto tb_found;
 138                                                }
 139                                        }
 140                                        if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
 141                                                snum = rover;
 142                                                goto tb_found;
 143                                        }
 144                                        goto next;
 145                                }
 146                        break;
 147                next:
 148                        spin_unlock(&head->lock);
 149                next_nolock:
 150                        if (++rover > high)
 151                                rover = low;
 152                } while (--remaining > 0);
 153
 154                /* Exhausted local port range during search?  It is not
 155                 * possible for us to be holding one of the bind hash
 156                 * locks if this test triggers, because if 'remaining'
 157                 * drops to zero, we broke out of the do/while loop at
 158                 * the top level, not from the 'break;' statement.
 159                 */
 160                ret = 1;
 161                if (remaining <= 0) {
 162                        if (smallest_size != -1) {
 163                                snum = smallest_rover;
 164                                goto have_snum;
 165                        }
 166                        goto fail;
 167                }
 168                /* OK, here is the one we will use.  HEAD is
 169                 * non-NULL and we hold it's mutex.
 170                 */
 171                snum = rover;
 172        } else {
 173have_snum:
 174                head = &hashinfo->bhash[inet_bhashfn(net, snum,
 175                                hashinfo->bhash_size)];
 176                spin_lock(&head->lock);
 177                inet_bind_bucket_for_each(tb, node, &head->chain)
 178                        if (net_eq(ib_net(tb), net) && tb->port == snum)
 179                                goto tb_found;
 180        }
 181        tb = NULL;
 182        goto tb_not_found;
 183tb_found:
 184        if (!hlist_empty(&tb->owners)) {
 185                if (sk->sk_reuse == SK_FORCE_REUSE)
 186                        goto success;
 187
 188                if (tb->fastreuse > 0 &&
 189                    sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
 190                    smallest_size == -1) {
 191                        goto success;
 192                } else {
 193                        ret = 1;
 194                        if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) {
 195                                if (sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
 196                                    smallest_size != -1 && --attempts >= 0) {
 197                                        spin_unlock(&head->lock);
 198                                        goto again;
 199                                }
 200
 201                                goto fail_unlock;
 202                        }
 203                }
 204        }
 205tb_not_found:
 206        ret = 1;
 207        if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep,
 208                                        net, head, snum)) == NULL)
 209                goto fail_unlock;
 210        if (hlist_empty(&tb->owners)) {
 211                if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
 212                        tb->fastreuse = 1;
 213                else
 214                        tb->fastreuse = 0;
 215        } else if (tb->fastreuse &&
 216                   (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
 217                tb->fastreuse = 0;
 218success:
 219        if (!inet_csk(sk)->icsk_bind_hash)
 220                inet_bind_hash(sk, tb, snum);
 221        WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
 222        ret = 0;
 223
 224fail_unlock:
 225        spin_unlock(&head->lock);
 226fail:
 227        local_bh_enable();
 228        return ret;
 229}
 230EXPORT_SYMBOL_GPL(inet_csk_get_port);
 231
 232/*
 233 * Wait for an incoming connection, avoid race conditions. This must be called
 234 * with the socket locked.
 235 */
 236static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
 237{
 238        struct inet_connection_sock *icsk = inet_csk(sk);
 239        DEFINE_WAIT(wait);
 240        int err;
 241
 242        /*
 243         * True wake-one mechanism for incoming connections: only
 244         * one process gets woken up, not the 'whole herd'.
 245         * Since we do not 'race & poll' for established sockets
 246         * anymore, the common case will execute the loop only once.
 247         *
 248         * Subtle issue: "add_wait_queue_exclusive()" will be added
 249         * after any current non-exclusive waiters, and we know that
 250         * it will always _stay_ after any new non-exclusive waiters
 251         * because all non-exclusive waiters are added at the
 252         * beginning of the wait-queue. As such, it's ok to "drop"
 253         * our exclusiveness temporarily when we get woken up without
 254         * having to remove and re-insert us on the wait queue.
 255         */
 256        for (;;) {
 257                prepare_to_wait_exclusive(sk_sleep(sk), &wait,
 258                                          TASK_INTERRUPTIBLE);
 259                release_sock(sk);
 260                if (reqsk_queue_empty(&icsk->icsk_accept_queue))
 261                        timeo = schedule_timeout(timeo);
 262                lock_sock(sk);
 263                err = 0;
 264                if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
 265                        break;
 266                err = -EINVAL;
 267                if (sk->sk_state != TCP_LISTEN)
 268                        break;
 269                err = sock_intr_errno(timeo);
 270                if (signal_pending(current))
 271                        break;
 272                err = -EAGAIN;
 273                if (!timeo)
 274                        break;
 275        }
 276        finish_wait(sk_sleep(sk), &wait);
 277        return err;
 278}
 279
 280/*
 281 * This will accept the next outstanding connection.
 282 */
 283struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
 284{
 285        struct inet_connection_sock *icsk = inet_csk(sk);
 286        struct sock *newsk;
 287        int error;
 288
 289        lock_sock(sk);
 290
 291        /* We need to make sure that this socket is listening,
 292         * and that it has something pending.
 293         */
 294        error = -EINVAL;
 295        if (sk->sk_state != TCP_LISTEN)
 296                goto out_err;
 297
 298        /* Find already established connection */
 299        if (reqsk_queue_empty(&icsk->icsk_accept_queue)) {
 300                long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
 301
 302                /* If this is a non blocking socket don't sleep */
 303                error = -EAGAIN;
 304                if (!timeo)
 305                        goto out_err;
 306
 307                error = inet_csk_wait_for_connect(sk, timeo);
 308                if (error)
 309                        goto out_err;
 310        }
 311
 312        newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk);
 313        WARN_ON(newsk->sk_state == TCP_SYN_RECV);
 314out:
 315        release_sock(sk);
 316        return newsk;
 317out_err:
 318        newsk = NULL;
 319        *err = error;
 320        goto out;
 321}
 322EXPORT_SYMBOL(inet_csk_accept);
 323
 324/*
 325 * Using different timers for retransmit, delayed acks and probes
 326 * We may wish use just one timer maintaining a list of expire jiffies
 327 * to optimize.
 328 */
 329void inet_csk_init_xmit_timers(struct sock *sk,
 330                               void (*retransmit_handler)(unsigned long),
 331                               void (*delack_handler)(unsigned long),
 332                               void (*keepalive_handler)(unsigned long))
 333{
 334        struct inet_connection_sock *icsk = inet_csk(sk);
 335
 336        setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler,
 337                        (unsigned long)sk);
 338        setup_timer(&icsk->icsk_delack_timer, delack_handler,
 339                        (unsigned long)sk);
 340        setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk);
 341        icsk->icsk_pending = icsk->icsk_ack.pending = 0;
 342}
 343EXPORT_SYMBOL(inet_csk_init_xmit_timers);
 344
 345void inet_csk_clear_xmit_timers(struct sock *sk)
 346{
 347        struct inet_connection_sock *icsk = inet_csk(sk);
 348
 349        icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
 350
 351        sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
 352        sk_stop_timer(sk, &icsk->icsk_delack_timer);
 353        sk_stop_timer(sk, &sk->sk_timer);
 354}
 355EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
 356
 357void inet_csk_delete_keepalive_timer(struct sock *sk)
 358{
 359        sk_stop_timer(sk, &sk->sk_timer);
 360}
 361EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
 362
 363void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
 364{
 365        sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
 366}
 367EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
 368
 369struct dst_entry *inet_csk_route_req(struct sock *sk,
 370                                     struct flowi4 *fl4,
 371                                     const struct request_sock *req)
 372{
 373        struct rtable *rt;
 374        const struct inet_request_sock *ireq = inet_rsk(req);
 375        struct ip_options_rcu *opt = inet_rsk(req)->opt;
 376        struct net *net = sock_net(sk);
 377        int flags = inet_sk_flowi_flags(sk);
 378
 379        flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
 380                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
 381                           sk->sk_protocol,
 382                           flags,
 383                           (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
 384                           ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
 385        security_req_classify_flow(req, flowi4_to_flowi(fl4));
 386        rt = ip_route_output_flow(net, fl4, sk);
 387        if (IS_ERR(rt))
 388                goto no_route;
 389        if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
 390                goto route_err;
 391        return &rt->dst;
 392
 393route_err:
 394        ip_rt_put(rt);
 395no_route:
 396        IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
 397        return NULL;
 398}
 399EXPORT_SYMBOL_GPL(inet_csk_route_req);
 400
 401struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
 402                                            struct sock *newsk,
 403                                            const struct request_sock *req)
 404{
 405        const struct inet_request_sock *ireq = inet_rsk(req);
 406        struct inet_sock *newinet = inet_sk(newsk);
 407        struct ip_options_rcu *opt;
 408        struct net *net = sock_net(sk);
 409        struct flowi4 *fl4;
 410        struct rtable *rt;
 411
 412        fl4 = &newinet->cork.fl.u.ip4;
 413
 414        rcu_read_lock();
 415        opt = rcu_dereference(newinet->inet_opt);
 416        flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
 417                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
 418                           sk->sk_protocol, inet_sk_flowi_flags(sk),
 419                           (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
 420                           ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
 421        security_req_classify_flow(req, flowi4_to_flowi(fl4));
 422        rt = ip_route_output_flow(net, fl4, sk);
 423        if (IS_ERR(rt))
 424                goto no_route;
 425        if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
 426                goto route_err;
 427        rcu_read_unlock();
 428        return &rt->dst;
 429
 430route_err:
 431        ip_rt_put(rt);
 432no_route:
 433        rcu_read_unlock();
 434        IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
 435        return NULL;
 436}
 437EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
 438
 439static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
 440                                 const u32 rnd, const u32 synq_hsize)
 441{
 442        return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
 443}
 444
 445#if IS_ENABLED(CONFIG_IPV6)
 446#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
 447#else
 448#define AF_INET_FAMILY(fam) 1
 449#endif
 450
 451struct request_sock *inet_csk_search_req(const struct sock *sk,
 452                                         struct request_sock ***prevp,
 453                                         const __be16 rport, const __be32 raddr,
 454                                         const __be32 laddr)
 455{
 456        const struct inet_connection_sock *icsk = inet_csk(sk);
 457        struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
 458        struct request_sock *req, **prev;
 459
 460        for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd,
 461                                                    lopt->nr_table_entries)];
 462             (req = *prev) != NULL;
 463             prev = &req->dl_next) {
 464                const struct inet_request_sock *ireq = inet_rsk(req);
 465
 466                if (ireq->rmt_port == rport &&
 467                    ireq->rmt_addr == raddr &&
 468                    ireq->loc_addr == laddr &&
 469                    AF_INET_FAMILY(req->rsk_ops->family)) {
 470                        WARN_ON(req->sk);
 471                        *prevp = prev;
 472                        break;
 473                }
 474        }
 475
 476        return req;
 477}
 478EXPORT_SYMBOL_GPL(inet_csk_search_req);
 479
 480void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
 481                                   unsigned long timeout)
 482{
 483        struct inet_connection_sock *icsk = inet_csk(sk);
 484        struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
 485        const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
 486                                     lopt->hash_rnd, lopt->nr_table_entries);
 487
 488        reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
 489        inet_csk_reqsk_queue_added(sk, timeout);
 490}
 491EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
 492
 493/* Only thing we need from tcp.h */
 494extern int sysctl_tcp_synack_retries;
 495
 496
 497/* Decide when to expire the request and when to resend SYN-ACK */
 498static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
 499                                  const int max_retries,
 500                                  const u8 rskq_defer_accept,
 501                                  int *expire, int *resend)
 502{
 503        if (!rskq_defer_accept) {
 504                *expire = req->retrans >= thresh;
 505                *resend = 1;
 506                return;
 507        }
 508        *expire = req->retrans >= thresh &&
 509                  (!inet_rsk(req)->acked || req->retrans >= max_retries);
 510        /*
 511         * Do not resend while waiting for data after ACK,
 512         * start to resend on end of deferring period to give
 513         * last chance for data or ACK to create established socket.
 514         */
 515        *resend = !inet_rsk(req)->acked ||
 516                  req->retrans >= rskq_defer_accept - 1;
 517}
 518
 519void inet_csk_reqsk_queue_prune(struct sock *parent,
 520                                const unsigned long interval,
 521                                const unsigned long timeout,
 522                                const unsigned long max_rto)
 523{
 524        struct inet_connection_sock *icsk = inet_csk(parent);
 525        struct request_sock_queue *queue = &icsk->icsk_accept_queue;
 526        struct listen_sock *lopt = queue->listen_opt;
 527        int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
 528        int thresh = max_retries;
 529        unsigned long now = jiffies;
 530        struct request_sock **reqp, *req;
 531        int i, budget;
 532
 533        if (lopt == NULL || lopt->qlen == 0)
 534                return;
 535
 536        /* Normally all the openreqs are young and become mature
 537         * (i.e. converted to established socket) for first timeout.
 538         * If synack was not acknowledged for 1 second, it means
 539         * one of the following things: synack was lost, ack was lost,
 540         * rtt is high or nobody planned to ack (i.e. synflood).
 541         * When server is a bit loaded, queue is populated with old
 542         * open requests, reducing effective size of queue.
 543         * When server is well loaded, queue size reduces to zero
 544         * after several minutes of work. It is not synflood,
 545         * it is normal operation. The solution is pruning
 546         * too old entries overriding normal timeout, when
 547         * situation becomes dangerous.
 548         *
 549         * Essentially, we reserve half of room for young
 550         * embrions; and abort old ones without pity, if old
 551         * ones are about to clog our table.
 552         */
 553        if (lopt->qlen>>(lopt->max_qlen_log-1)) {
 554                int young = (lopt->qlen_young<<1);
 555
 556                while (thresh > 2) {
 557                        if (lopt->qlen < young)
 558                                break;
 559                        thresh--;
 560                        young <<= 1;
 561                }
 562        }
 563
 564        if (queue->rskq_defer_accept)
 565                max_retries = queue->rskq_defer_accept;
 566
 567        budget = 2 * (lopt->nr_table_entries / (timeout / interval));
 568        i = lopt->clock_hand;
 569
 570        do {
 571                reqp=&lopt->syn_table[i];
 572                while ((req = *reqp) != NULL) {
 573                        if (time_after_eq(now, req->expires)) {
 574                                int expire = 0, resend = 0;
 575
 576                                syn_ack_recalc(req, thresh, max_retries,
 577                                               queue->rskq_defer_accept,
 578                                               &expire, &resend);
 579                                req->rsk_ops->syn_ack_timeout(parent, req);
 580                                if (!expire &&
 581                                    (!resend ||
 582                                     !req->rsk_ops->rtx_syn_ack(parent, req, NULL) ||
 583                                     inet_rsk(req)->acked)) {
 584                                        unsigned long timeo;
 585
 586                                        if (req->retrans++ == 0)
 587                                                lopt->qlen_young--;
 588                                        timeo = min((timeout << req->retrans), max_rto);
 589                                        req->expires = now + timeo;
 590                                        reqp = &req->dl_next;
 591                                        continue;
 592                                }
 593
 594                                /* Drop this request */
 595                                inet_csk_reqsk_queue_unlink(parent, req, reqp);
 596                                reqsk_queue_removed(queue, req);
 597                                reqsk_free(req);
 598                                continue;
 599                        }
 600                        reqp = &req->dl_next;
 601                }
 602
 603                i = (i + 1) & (lopt->nr_table_entries - 1);
 604
 605        } while (--budget > 0);
 606
 607        lopt->clock_hand = i;
 608
 609        if (lopt->qlen)
 610                inet_csk_reset_keepalive_timer(parent, interval);
 611}
 612EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
 613
 614/**
 615 *      inet_csk_clone_lock - clone an inet socket, and lock its clone
 616 *      @sk: the socket to clone
 617 *      @req: request_sock
 618 *      @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
 619 *
 620 *      Caller must unlock socket even in error path (bh_unlock_sock(newsk))
 621 */
 622struct sock *inet_csk_clone_lock(const struct sock *sk,
 623                                 const struct request_sock *req,
 624                                 const gfp_t priority)
 625{
 626        struct sock *newsk = sk_clone_lock(sk, priority);
 627
 628        if (newsk != NULL) {
 629                struct inet_connection_sock *newicsk = inet_csk(newsk);
 630
 631                newsk->sk_state = TCP_SYN_RECV;
 632                newicsk->icsk_bind_hash = NULL;
 633
 634                inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port;
 635                inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->loc_port);
 636                inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port;
 637                newsk->sk_write_space = sk_stream_write_space;
 638
 639                newicsk->icsk_retransmits = 0;
 640                newicsk->icsk_backoff     = 0;
 641                newicsk->icsk_probes_out  = 0;
 642
 643                /* Deinitialize accept_queue to trap illegal accesses. */
 644                memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
 645
 646                security_inet_csk_clone(newsk, req);
 647        }
 648        return newsk;
 649}
 650EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
 651
 652/*
 653 * At this point, there should be no process reference to this
 654 * socket, and thus no user references at all.  Therefore we
 655 * can assume the socket waitqueue is inactive and nobody will
 656 * try to jump onto it.
 657 */
 658void inet_csk_destroy_sock(struct sock *sk)
 659{
 660        WARN_ON(sk->sk_state != TCP_CLOSE);
 661        WARN_ON(!sock_flag(sk, SOCK_DEAD));
 662
 663        /* It cannot be in hash table! */
 664        WARN_ON(!sk_unhashed(sk));
 665
 666        /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
 667        WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
 668
 669        sk->sk_prot->destroy(sk);
 670
 671        sk_stream_kill_queues(sk);
 672
 673        xfrm_sk_free_policy(sk);
 674
 675        sk_refcnt_debug_release(sk);
 676
 677        percpu_counter_dec(sk->sk_prot->orphan_count);
 678        sock_put(sk);
 679}
 680EXPORT_SYMBOL(inet_csk_destroy_sock);
 681
 682int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
 683{
 684        struct inet_sock *inet = inet_sk(sk);
 685        struct inet_connection_sock *icsk = inet_csk(sk);
 686        int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
 687
 688        if (rc != 0)
 689                return rc;
 690
 691        sk->sk_max_ack_backlog = 0;
 692        sk->sk_ack_backlog = 0;
 693        inet_csk_delack_init(sk);
 694
 695        /* There is race window here: we announce ourselves listening,
 696         * but this transition is still not validated by get_port().
 697         * It is OK, because this socket enters to hash table only
 698         * after validation is complete.
 699         */
 700        sk->sk_state = TCP_LISTEN;
 701        if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
 702                inet->inet_sport = htons(inet->inet_num);
 703
 704                sk_dst_reset(sk);
 705                sk->sk_prot->hash(sk);
 706
 707                return 0;
 708        }
 709
 710        sk->sk_state = TCP_CLOSE;
 711        __reqsk_queue_destroy(&icsk->icsk_accept_queue);
 712        return -EADDRINUSE;
 713}
 714EXPORT_SYMBOL_GPL(inet_csk_listen_start);
 715
 716/*
 717 *      This routine closes sockets which have been at least partially
 718 *      opened, but not yet accepted.
 719 */
 720void inet_csk_listen_stop(struct sock *sk)
 721{
 722        struct inet_connection_sock *icsk = inet_csk(sk);
 723        struct request_sock *acc_req;
 724        struct request_sock *req;
 725
 726        inet_csk_delete_keepalive_timer(sk);
 727
 728        /* make all the listen_opt local to us */
 729        acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue);
 730
 731        /* Following specs, it would be better either to send FIN
 732         * (and enter FIN-WAIT-1, it is normal close)
 733         * or to send active reset (abort).
 734         * Certainly, it is pretty dangerous while synflood, but it is
 735         * bad justification for our negligence 8)
 736         * To be honest, we are not able to make either
 737         * of the variants now.                 --ANK
 738         */
 739        reqsk_queue_destroy(&icsk->icsk_accept_queue);
 740
 741        while ((req = acc_req) != NULL) {
 742                struct sock *child = req->sk;
 743
 744                acc_req = req->dl_next;
 745
 746                local_bh_disable();
 747                bh_lock_sock(child);
 748                WARN_ON(sock_owned_by_user(child));
 749                sock_hold(child);
 750
 751                sk->sk_prot->disconnect(child, O_NONBLOCK);
 752
 753                sock_orphan(child);
 754
 755                percpu_counter_inc(sk->sk_prot->orphan_count);
 756
 757                inet_csk_destroy_sock(child);
 758
 759                bh_unlock_sock(child);
 760                local_bh_enable();
 761                sock_put(child);
 762
 763                sk_acceptq_removed(sk);
 764                __reqsk_free(req);
 765        }
 766        WARN_ON(sk->sk_ack_backlog);
 767}
 768EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
 769
 770void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
 771{
 772        struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
 773        const struct inet_sock *inet = inet_sk(sk);
 774
 775        sin->sin_family         = AF_INET;
 776        sin->sin_addr.s_addr    = inet->inet_daddr;
 777        sin->sin_port           = inet->inet_dport;
 778}
 779EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
 780
 781#ifdef CONFIG_COMPAT
 782int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
 783                               char __user *optval, int __user *optlen)
 784{
 785        const struct inet_connection_sock *icsk = inet_csk(sk);
 786
 787        if (icsk->icsk_af_ops->compat_getsockopt != NULL)
 788                return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
 789                                                            optval, optlen);
 790        return icsk->icsk_af_ops->getsockopt(sk, level, optname,
 791                                             optval, optlen);
 792}
 793EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
 794
 795int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
 796                               char __user *optval, unsigned int optlen)
 797{
 798        const struct inet_connection_sock *icsk = inet_csk(sk);
 799
 800        if (icsk->icsk_af_ops->compat_setsockopt != NULL)
 801                return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
 802                                                            optval, optlen);
 803        return icsk->icsk_af_ops->setsockopt(sk, level, optname,
 804                                             optval, optlen);
 805}
 806EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
 807#endif
 808
 809static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
 810{
 811        const struct inet_sock *inet = inet_sk(sk);
 812        const struct ip_options_rcu *inet_opt;
 813        __be32 daddr = inet->inet_daddr;
 814        struct flowi4 *fl4;
 815        struct rtable *rt;
 816
 817        rcu_read_lock();
 818        inet_opt = rcu_dereference(inet->inet_opt);
 819        if (inet_opt && inet_opt->opt.srr)
 820                daddr = inet_opt->opt.faddr;
 821        fl4 = &fl->u.ip4;
 822        rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
 823                                   inet->inet_saddr, inet->inet_dport,
 824                                   inet->inet_sport, sk->sk_protocol,
 825                                   RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
 826        if (IS_ERR(rt))
 827                rt = NULL;
 828        if (rt)
 829                sk_setup_caps(sk, &rt->dst);
 830        rcu_read_unlock();
 831
 832        return &rt->dst;
 833}
 834
 835struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
 836{
 837        struct dst_entry *dst = __sk_dst_check(sk, 0);
 838        struct inet_sock *inet = inet_sk(sk);
 839
 840        if (!dst) {
 841                dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
 842                if (!dst)
 843                        goto out;
 844        }
 845        dst->ops->update_pmtu(dst, sk, NULL, mtu);
 846
 847        dst = __sk_dst_check(sk, 0);
 848        if (!dst)
 849                dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
 850out:
 851        return dst;
 852}
 853EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);
 854
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.