linux/include/net/tcp.h
<<
>>
Prefs
   1/*
   2 * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3 *              operating system.  INET is implemented using the  BSD Socket
   4 *              interface as the means of communication with the user level.
   5 *
   6 *              Definitions for the TCP module.
   7 *
   8 * Version:     @(#)tcp.h       1.0.5   05/23/93
   9 *
  10 * Authors:     Ross Biro
  11 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *
  13 *              This program is free software; you can redistribute it and/or
  14 *              modify it under the terms of the GNU General Public License
  15 *              as published by the Free Software Foundation; either version
  16 *              2 of the License, or (at your option) any later version.
  17 */
  18#ifndef _TCP_H
  19#define _TCP_H
  20
  21#define FASTRETRANS_DEBUG 1
  22
  23#include <linux/list.h>
  24#include <linux/tcp.h>
  25#include <linux/bug.h>
  26#include <linux/slab.h>
  27#include <linux/cache.h>
  28#include <linux/percpu.h>
  29#include <linux/skbuff.h>
  30#include <linux/dmaengine.h>
  31#include <linux/crypto.h>
  32#include <linux/cryptohash.h>
  33#include <linux/kref.h>
  34
  35#include <net/inet_connection_sock.h>
  36#include <net/inet_timewait_sock.h>
  37#include <net/inet_hashtables.h>
  38#include <net/checksum.h>
  39#include <net/request_sock.h>
  40#include <net/sock.h>
  41#include <net/snmp.h>
  42#include <net/ip.h>
  43#include <net/tcp_states.h>
  44#include <net/inet_ecn.h>
  45#include <net/dst.h>
  46
  47#include <linux/seq_file.h>
  48#include <linux/memcontrol.h>
  49
  50extern struct inet_hashinfo tcp_hashinfo;
  51
  52extern struct percpu_counter tcp_orphan_count;
  53void tcp_time_wait(struct sock *sk, int state, int timeo);
  54
  55#define MAX_TCP_HEADER  (128 + MAX_HEADER)
  56#define MAX_TCP_OPTION_SPACE 40
  57
  58/* 
  59 * Never offer a window over 32767 without using window scaling. Some
  60 * poor stacks do signed 16bit maths! 
  61 */
  62#define MAX_TCP_WINDOW          32767U
  63
  64/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
  65#define TCP_MIN_MSS             88U
  66
  67/* The least MTU to use for probing */
  68#define TCP_BASE_MSS            512
  69
  70/* After receiving this amount of duplicate ACKs fast retransmit starts. */
  71#define TCP_FASTRETRANS_THRESH 3
  72
  73/* Maximal reordering. */
  74#define TCP_MAX_REORDERING      127
  75
  76/* Maximal number of ACKs sent quickly to accelerate slow-start. */
  77#define TCP_MAX_QUICKACKS       16U
  78
  79/* urg_data states */
  80#define TCP_URG_VALID   0x0100
  81#define TCP_URG_NOTYET  0x0200
  82#define TCP_URG_READ    0x0400
  83
  84#define TCP_RETR1       3       /*
  85                                 * This is how many retries it does before it
  86                                 * tries to figure out if the gateway is
  87                                 * down. Minimal RFC value is 3; it corresponds
  88                                 * to ~3sec-8min depending on RTO.
  89                                 */
  90
  91#define TCP_RETR2       15      /*
  92                                 * This should take at least
  93                                 * 90 minutes to time out.
  94                                 * RFC1122 says that the limit is 100 sec.
  95                                 * 15 is ~13-30min depending on RTO.
  96                                 */
  97
  98#define TCP_SYN_RETRIES  6      /* This is how many retries are done
  99                                 * when active opening a connection.
 100                                 * RFC1122 says the minimum retry MUST
 101                                 * be at least 180secs.  Nevertheless
 102                                 * this value is corresponding to
 103                                 * 63secs of retransmission with the
 104                                 * current initial RTO.
 105                                 */
 106
 107#define TCP_SYNACK_RETRIES 5    /* This is how may retries are done
 108                                 * when passive opening a connection.
 109                                 * This is corresponding to 31secs of
 110                                 * retransmission with the current
 111                                 * initial RTO.
 112                                 */
 113
 114#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
 115                                  * state, about 60 seconds     */
 116#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
 117                                 /* BSD style FIN_WAIT2 deadlock breaker.
 118                                  * It used to be 3min, new value is 60sec,
 119                                  * to combine FIN-WAIT-2 timeout with
 120                                  * TIME-WAIT timer.
 121                                  */
 122
 123#define TCP_DELACK_MAX  ((unsigned)(HZ/5))      /* maximal time to delay before sending an ACK */
 124#if HZ >= 100
 125#define TCP_DELACK_MIN  ((unsigned)(HZ/25))     /* minimal time to delay before sending an ACK */
 126#define TCP_ATO_MIN     ((unsigned)(HZ/25))
 127#else
 128#define TCP_DELACK_MIN  4U
 129#define TCP_ATO_MIN     4U
 130#endif
 131#define TCP_RTO_MAX     ((unsigned)(120*HZ))
 132#define TCP_RTO_MIN     ((unsigned)(HZ/5))
 133#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))     /* RFC6298 2.1 initial RTO value        */
 134#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
 135                                                 * used as a fallback RTO for the
 136                                                 * initial data transmission if no
 137                                                 * valid RTT sample has been acquired,
 138                                                 * most likely due to retrans in 3WHS.
 139                                                 */
 140
 141#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
 142                                                         * for local resources.
 143                                                         */
 144
 145#define TCP_KEEPALIVE_TIME      (120*60*HZ)     /* two hours */
 146#define TCP_KEEPALIVE_PROBES    9               /* Max of 9 keepalive probes    */
 147#define TCP_KEEPALIVE_INTVL     (75*HZ)
 148
 149#define MAX_TCP_KEEPIDLE        32767
 150#define MAX_TCP_KEEPINTVL       32767
 151#define MAX_TCP_KEEPCNT         127
 152#define MAX_TCP_SYNCNT          127
 153
 154#define TCP_SYNQ_INTERVAL       (HZ/5)  /* Period of SYNACK timer */
 155
 156#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
 157#define TCP_PAWS_MSL    60              /* Per-host timestamps are invalidated
 158                                         * after this time. It should be equal
 159                                         * (or greater than) TCP_TIMEWAIT_LEN
 160                                         * to provide reliability equal to one
 161                                         * provided by timewait state.
 162                                         */
 163#define TCP_PAWS_WINDOW 1               /* Replay window for per-host
 164                                         * timestamps. It must be less than
 165                                         * minimal timewait lifetime.
 166                                         */
 167/*
 168 *      TCP option
 169 */
 170 
 171#define TCPOPT_NOP              1       /* Padding */
 172#define TCPOPT_EOL              0       /* End of options */
 173#define TCPOPT_MSS              2       /* Segment size negotiating */
 174#define TCPOPT_WINDOW           3       /* Window scaling */
 175#define TCPOPT_SACK_PERM        4       /* SACK Permitted */
 176#define TCPOPT_SACK             5       /* SACK Block */
 177#define TCPOPT_TIMESTAMP        8       /* Better RTT estimations/PAWS */
 178#define TCPOPT_MD5SIG           19      /* MD5 Signature (RFC2385) */
 179#define TCPOPT_EXP              254     /* Experimental */
 180/* Magic number to be after the option value for sharing TCP
 181 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
 182 */
 183#define TCPOPT_FASTOPEN_MAGIC   0xF989
 184
 185/*
 186 *     TCP option lengths
 187 */
 188
 189#define TCPOLEN_MSS            4
 190#define TCPOLEN_WINDOW         3
 191#define TCPOLEN_SACK_PERM      2
 192#define TCPOLEN_TIMESTAMP      10
 193#define TCPOLEN_MD5SIG         18
 194#define TCPOLEN_EXP_FASTOPEN_BASE  4
 195
 196/* But this is what stacks really send out. */
 197#define TCPOLEN_TSTAMP_ALIGNED          12
 198#define TCPOLEN_WSCALE_ALIGNED          4
 199#define TCPOLEN_SACKPERM_ALIGNED        4
 200#define TCPOLEN_SACK_BASE               2
 201#define TCPOLEN_SACK_BASE_ALIGNED       4
 202#define TCPOLEN_SACK_PERBLOCK           8
 203#define TCPOLEN_MD5SIG_ALIGNED          20
 204#define TCPOLEN_MSS_ALIGNED             4
 205
 206/* Flags in tp->nonagle */
 207#define TCP_NAGLE_OFF           1       /* Nagle's algo is disabled */
 208#define TCP_NAGLE_CORK          2       /* Socket is corked         */
 209#define TCP_NAGLE_PUSH          4       /* Cork is overridden for already queued data */
 210
 211/* TCP thin-stream limits */
 212#define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
 213
 214/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */
 215#define TCP_INIT_CWND           10
 216
 217/* Bit Flags for sysctl_tcp_fastopen */
 218#define TFO_CLIENT_ENABLE       1
 219#define TFO_SERVER_ENABLE       2
 220#define TFO_CLIENT_NO_COOKIE    4       /* Data in SYN w/o cookie option */
 221
 222/* Process SYN data but skip cookie validation */
 223#define TFO_SERVER_COOKIE_NOT_CHKED     0x100
 224/* Accept SYN data w/o any cookie option */
 225#define TFO_SERVER_COOKIE_NOT_REQD      0x200
 226
 227/* Force enable TFO on all listeners, i.e., not requiring the
 228 * TCP_FASTOPEN socket option. SOCKOPT1/2 determine how to set max_qlen.
 229 */
 230#define TFO_SERVER_WO_SOCKOPT1  0x400
 231#define TFO_SERVER_WO_SOCKOPT2  0x800
 232/* Always create TFO child sockets on a TFO listener even when
 233 * cookie/data not present. (For testing purpose!)
 234 */
 235#define TFO_SERVER_ALWAYS       0x1000
 236
 237extern struct inet_timewait_death_row tcp_death_row;
 238
 239/* sysctl variables for tcp */
 240extern int sysctl_tcp_timestamps;
 241extern int sysctl_tcp_window_scaling;
 242extern int sysctl_tcp_sack;
 243extern int sysctl_tcp_fin_timeout;
 244extern int sysctl_tcp_keepalive_time;
 245extern int sysctl_tcp_keepalive_probes;
 246extern int sysctl_tcp_keepalive_intvl;
 247extern int sysctl_tcp_syn_retries;
 248extern int sysctl_tcp_synack_retries;
 249extern int sysctl_tcp_retries1;
 250extern int sysctl_tcp_retries2;
 251extern int sysctl_tcp_orphan_retries;
 252extern int sysctl_tcp_syncookies;
 253extern int sysctl_tcp_fastopen;
 254extern int sysctl_tcp_retrans_collapse;
 255extern int sysctl_tcp_stdurg;
 256extern int sysctl_tcp_rfc1337;
 257extern int sysctl_tcp_abort_on_overflow;
 258extern int sysctl_tcp_max_orphans;
 259extern int sysctl_tcp_fack;
 260extern int sysctl_tcp_reordering;
 261extern int sysctl_tcp_dsack;
 262extern long sysctl_tcp_mem[3];
 263extern int sysctl_tcp_wmem[3];
 264extern int sysctl_tcp_rmem[3];
 265extern int sysctl_tcp_app_win;
 266extern int sysctl_tcp_adv_win_scale;
 267extern int sysctl_tcp_tw_reuse;
 268extern int sysctl_tcp_frto;
 269extern int sysctl_tcp_low_latency;
 270extern int sysctl_tcp_dma_copybreak;
 271extern int sysctl_tcp_nometrics_save;
 272extern int sysctl_tcp_moderate_rcvbuf;
 273extern int sysctl_tcp_tso_win_divisor;
 274extern int sysctl_tcp_mtu_probing;
 275extern int sysctl_tcp_base_mss;
 276extern int sysctl_tcp_workaround_signed_windows;
 277extern int sysctl_tcp_slow_start_after_idle;
 278extern int sysctl_tcp_thin_linear_timeouts;
 279extern int sysctl_tcp_thin_dupack;
 280extern int sysctl_tcp_early_retrans;
 281extern int sysctl_tcp_limit_output_bytes;
 282extern int sysctl_tcp_challenge_ack_limit;
 283extern unsigned int sysctl_tcp_notsent_lowat;
 284extern int sysctl_tcp_min_tso_segs;
 285extern int sysctl_tcp_autocorking;
 286
 287extern atomic_long_t tcp_memory_allocated;
 288extern struct percpu_counter tcp_sockets_allocated;
 289extern int tcp_memory_pressure;
 290
 291/*
 292 * The next routines deal with comparing 32 bit unsigned ints
 293 * and worry about wraparound (automatic with unsigned arithmetic).
 294 */
 295
 296static inline bool before(__u32 seq1, __u32 seq2)
 297{
 298        return (__s32)(seq1-seq2) < 0;
 299}
 300#define after(seq2, seq1)       before(seq1, seq2)
 301
 302/* is s2<=s1<=s3 ? */
 303static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
 304{
 305        return seq3 - seq2 >= seq1 - seq2;
 306}
 307
 308static inline bool tcp_out_of_memory(struct sock *sk)
 309{
 310        if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
 311            sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
 312                return true;
 313        return false;
 314}
 315
 316static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
 317{
 318        struct percpu_counter *ocp = sk->sk_prot->orphan_count;
 319        int orphans = percpu_counter_read_positive(ocp);
 320
 321        if (orphans << shift > sysctl_tcp_max_orphans) {
 322                orphans = percpu_counter_sum_positive(ocp);
 323                if (orphans << shift > sysctl_tcp_max_orphans)
 324                        return true;
 325        }
 326        return false;
 327}
 328
 329bool tcp_check_oom(struct sock *sk, int shift);
 330
 331/* syncookies: remember time of last synqueue overflow */
 332static inline void tcp_synq_overflow(struct sock *sk)
 333{
 334        tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
 335}
 336
 337/* syncookies: no recent synqueue overflow on this listening socket? */
 338static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
 339{
 340        unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
 341        return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
 342}
 343
 344extern struct proto tcp_prot;
 345
 346#define TCP_INC_STATS(net, field)       SNMP_INC_STATS((net)->mib.tcp_statistics, field)
 347#define TCP_INC_STATS_BH(net, field)    SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
 348#define TCP_DEC_STATS(net, field)       SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
 349#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
 350#define TCP_ADD_STATS(net, field, val)  SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
 351
 352void tcp_tasklet_init(void);
 353
 354void tcp_v4_err(struct sk_buff *skb, u32);
 355
 356void tcp_shutdown(struct sock *sk, int how);
 357
 358void tcp_v4_early_demux(struct sk_buff *skb);
 359int tcp_v4_rcv(struct sk_buff *skb);
 360
 361int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
 362int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 363                size_t size);
 364int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
 365                 int flags);
 366void tcp_release_cb(struct sock *sk);
 367void tcp_wfree(struct sk_buff *skb);
 368void tcp_write_timer_handler(struct sock *sk);
 369void tcp_delack_timer_handler(struct sock *sk);
 370int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 371int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 372                          const struct tcphdr *th, unsigned int len);
 373void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 374                         const struct tcphdr *th, unsigned int len);
 375void tcp_rcv_space_adjust(struct sock *sk);
 376void tcp_cleanup_rbuf(struct sock *sk, int copied);
 377int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
 378void tcp_twsk_destructor(struct sock *sk);
 379ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
 380                        struct pipe_inode_info *pipe, size_t len,
 381                        unsigned int flags);
 382
 383static inline void tcp_dec_quickack_mode(struct sock *sk,
 384                                         const unsigned int pkts)
 385{
 386        struct inet_connection_sock *icsk = inet_csk(sk);
 387
 388        if (icsk->icsk_ack.quick) {
 389                if (pkts >= icsk->icsk_ack.quick) {
 390                        icsk->icsk_ack.quick = 0;
 391                        /* Leaving quickack mode we deflate ATO. */
 392                        icsk->icsk_ack.ato   = TCP_ATO_MIN;
 393                } else
 394                        icsk->icsk_ack.quick -= pkts;
 395        }
 396}
 397
 398#define TCP_ECN_OK              1
 399#define TCP_ECN_QUEUE_CWR       2
 400#define TCP_ECN_DEMAND_CWR      4
 401#define TCP_ECN_SEEN            8
 402
 403enum tcp_tw_status {
 404        TCP_TW_SUCCESS = 0,
 405        TCP_TW_RST = 1,
 406        TCP_TW_ACK = 2,
 407        TCP_TW_SYN = 3
 408};
 409
 410
 411enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
 412                                              struct sk_buff *skb,
 413                                              const struct tcphdr *th);
 414struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
 415                           struct request_sock *req, struct request_sock **prev,
 416                           bool fastopen);
 417int tcp_child_process(struct sock *parent, struct sock *child,
 418                      struct sk_buff *skb);
 419void tcp_enter_loss(struct sock *sk, int how);
 420void tcp_clear_retrans(struct tcp_sock *tp);
 421void tcp_update_metrics(struct sock *sk);
 422void tcp_init_metrics(struct sock *sk);
 423void tcp_metrics_init(void);
 424bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
 425                        bool paws_check);
 426bool tcp_remember_stamp(struct sock *sk);
 427bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
 428void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
 429void tcp_disable_fack(struct tcp_sock *tp);
 430void tcp_close(struct sock *sk, long timeout);
 431void tcp_init_sock(struct sock *sk);
 432unsigned int tcp_poll(struct file *file, struct socket *sock,
 433                      struct poll_table_struct *wait);
 434int tcp_getsockopt(struct sock *sk, int level, int optname,
 435                   char __user *optval, int __user *optlen);
 436int tcp_setsockopt(struct sock *sk, int level, int optname,
 437                   char __user *optval, unsigned int optlen);
 438int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
 439                          char __user *optval, int __user *optlen);
 440int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
 441                          char __user *optval, unsigned int optlen);
 442void tcp_set_keepalive(struct sock *sk, int val);
 443void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
 444int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 445                size_t len, int nonblock, int flags, int *addr_len);
 446void tcp_parse_options(const struct sk_buff *skb,
 447                       struct tcp_options_received *opt_rx,
 448                       int estab, struct tcp_fastopen_cookie *foc);
 449const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
 450
 451/*
 452 *      TCP v4 functions exported for the inet6 API
 453 */
 454
 455void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
 456int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 457struct sock *tcp_create_openreq_child(struct sock *sk,
 458                                      struct request_sock *req,
 459                                      struct sk_buff *skb);
 460struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 461                                  struct request_sock *req,
 462                                  struct dst_entry *dst);
 463int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
 464int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 465int tcp_connect(struct sock *sk);
 466struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
 467                                struct request_sock *req,
 468                                struct tcp_fastopen_cookie *foc);
 469int tcp_disconnect(struct sock *sk, int flags);
 470
 471void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
 472int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
 473void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
 474
 475/* From syncookies.c */
 476int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
 477                      u32 cookie);
 478struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
 479                             struct ip_options *opt);
 480#ifdef CONFIG_SYN_COOKIES
 481#include <linux/ktime.h>
 482
 483/* Syncookies use a monotonic timer which increments every 60 seconds.
 484 * This counter is used both as a hash input and partially encoded into
 485 * the cookie value.  A cookie is only validated further if the delta
 486 * between the current counter value and the encoded one is less than this,
 487 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
 488 * the counter advances immediately after a cookie is generated).
 489 */
 490#define MAX_SYNCOOKIE_AGE 2
 491
 492static inline u32 tcp_cookie_time(void)
 493{
 494        u64 val = get_jiffies_64();
 495
 496        do_div(val, 60 * HZ);
 497        return val;
 498}
 499
 500u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
 501                              u16 *mssp);
 502__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mss);
 503#else
 504static inline __u32 cookie_v4_init_sequence(struct sock *sk,
 505                                            struct sk_buff *skb,
 506                                            __u16 *mss)
 507{
 508        return 0;
 509}
 510#endif
 511
 512__u32 cookie_init_timestamp(struct request_sock *req);
 513bool cookie_check_timestamp(struct tcp_options_received *opt, struct net *net,
 514                            bool *ecn_ok);
 515
 516/* From net/ipv6/syncookies.c */
 517int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
 518                      u32 cookie);
 519struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
 520#ifdef CONFIG_SYN_COOKIES
 521u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
 522                              const struct tcphdr *th, u16 *mssp);
 523__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
 524                              __u16 *mss);
 525#else
 526static inline __u32 cookie_v6_init_sequence(struct sock *sk,
 527                                            struct sk_buff *skb,
 528                                            __u16 *mss)
 529{
 530        return 0;
 531}
 532#endif
 533/* tcp_output.c */
 534
 535void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
 536                               int nonagle);
 537bool tcp_may_send_now(struct sock *sk);
 538int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
 539int tcp_retransmit_skb(struct sock *, struct sk_buff *);
 540void tcp_retransmit_timer(struct sock *sk);
 541void tcp_xmit_retransmit_queue(struct sock *);
 542void tcp_simple_retransmit(struct sock *);
 543int tcp_trim_head(struct sock *, struct sk_buff *, u32);
 544int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
 545
 546void tcp_send_probe0(struct sock *);
 547void tcp_send_partial(struct sock *);
 548int tcp_write_wakeup(struct sock *);
 549void tcp_send_fin(struct sock *sk);
 550void tcp_send_active_reset(struct sock *sk, gfp_t priority);
 551int tcp_send_synack(struct sock *);
 552bool tcp_syn_flood_action(struct sock *sk, const struct sk_buff *skb,
 553                          const char *proto);
 554void tcp_push_one(struct sock *, unsigned int mss_now);
 555void tcp_send_ack(struct sock *sk);
 556void tcp_send_delayed_ack(struct sock *sk);
 557void tcp_send_loss_probe(struct sock *sk);
 558bool tcp_schedule_loss_probe(struct sock *sk);
 559
 560/* tcp_input.c */
 561void tcp_cwnd_application_limited(struct sock *sk);
 562void tcp_resume_early_retransmit(struct sock *sk);
 563void tcp_rearm_rto(struct sock *sk);
 564void tcp_reset(struct sock *sk);
 565
 566/* tcp_timer.c */
 567void tcp_init_xmit_timers(struct sock *);
 568static inline void tcp_clear_xmit_timers(struct sock *sk)
 569{
 570        inet_csk_clear_xmit_timers(sk);
 571}
 572
 573unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
 574unsigned int tcp_current_mss(struct sock *sk);
 575
 576/* Bound MSS / TSO packet size with the half of the window */
 577static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
 578{
 579        int cutoff;
 580
 581        /* When peer uses tiny windows, there is no use in packetizing
 582         * to sub-MSS pieces for the sake of SWS or making sure there
 583         * are enough packets in the pipe for fast recovery.
 584         *
 585         * On the other hand, for extremely large MSS devices, handling
 586         * smaller than MSS windows in this way does make sense.
 587         */
 588        if (tp->max_window >= 512)
 589                cutoff = (tp->max_window >> 1);
 590        else
 591                cutoff = tp->max_window;
 592
 593        if (cutoff && pktsize > cutoff)
 594                return max_t(int, cutoff, 68U - tp->tcp_header_len);
 595        else
 596                return pktsize;
 597}
 598
 599/* tcp.c */
 600void tcp_get_info(const struct sock *, struct tcp_info *);
 601
 602/* Read 'sendfile()'-style from a TCP socket */
 603typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
 604                                unsigned int, size_t);
 605int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
 606                  sk_read_actor_t recv_actor);
 607
 608void tcp_initialize_rcv_mss(struct sock *sk);
 609
 610int tcp_mtu_to_mss(struct sock *sk, int pmtu);
 611int tcp_mss_to_mtu(struct sock *sk, int mss);
 612void tcp_mtup_init(struct sock *sk);
 613void tcp_init_buffer_space(struct sock *sk);
 614
 615static inline void tcp_bound_rto(const struct sock *sk)
 616{
 617        if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
 618                inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
 619}
 620
 621static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
 622{
 623        return (tp->srtt >> 3) + tp->rttvar;
 624}
 625
 626static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
 627{
 628        tp->pred_flags = htonl((tp->tcp_header_len << 26) |
 629                               ntohl(TCP_FLAG_ACK) |
 630                               snd_wnd);
 631}
 632
 633static inline void tcp_fast_path_on(struct tcp_sock *tp)
 634{
 635        __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
 636}
 637
 638static inline void tcp_fast_path_check(struct sock *sk)
 639{
 640        struct tcp_sock *tp = tcp_sk(sk);
 641
 642        if (skb_queue_empty(&tp->out_of_order_queue) &&
 643            tp->rcv_wnd &&
 644            atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
 645            !tp->urg_data)
 646                tcp_fast_path_on(tp);
 647}
 648
 649/* Compute the actual rto_min value */
 650static inline u32 tcp_rto_min(struct sock *sk)
 651{
 652        const struct dst_entry *dst = __sk_dst_get(sk);
 653        u32 rto_min = TCP_RTO_MIN;
 654
 655        if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
 656                rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
 657        return rto_min;
 658}
 659
 660/* Compute the actual receive window we are currently advertising.
 661 * Rcv_nxt can be after the window if our peer push more data
 662 * than the offered window.
 663 */
 664static inline u32 tcp_receive_window(const struct tcp_sock *tp)
 665{
 666        s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
 667
 668        if (win < 0)
 669                win = 0;
 670        return (u32) win;
 671}
 672
 673/* Choose a new window, without checks for shrinking, and without
 674 * scaling applied to the result.  The caller does these things
 675 * if necessary.  This is a "raw" window selection.
 676 */
 677u32 __tcp_select_window(struct sock *sk);
 678
 679void tcp_send_window_probe(struct sock *sk);
 680
 681/* TCP timestamps are only 32-bits, this causes a slight
 682 * complication on 64-bit systems since we store a snapshot
 683 * of jiffies in the buffer control blocks below.  We decided
 684 * to use only the low 32-bits of jiffies and hide the ugly
 685 * casts with the following macro.
 686 */
 687#define tcp_time_stamp          ((__u32)(jiffies))
 688
 689#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
 690
 691#define TCPHDR_FIN 0x01
 692#define TCPHDR_SYN 0x02
 693#define TCPHDR_RST 0x04
 694#define TCPHDR_PSH 0x08
 695#define TCPHDR_ACK 0x10
 696#define TCPHDR_URG 0x20
 697#define TCPHDR_ECE 0x40
 698#define TCPHDR_CWR 0x80
 699
 700/* This is what the send packet queuing engine uses to pass
 701 * TCP per-packet control information to the transmission code.
 702 * We also store the host-order sequence numbers in here too.
 703 * This is 44 bytes if IPV6 is enabled.
 704 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
 705 */
 706struct tcp_skb_cb {
 707        union {
 708                struct inet_skb_parm    h4;
 709#if IS_ENABLED(CONFIG_IPV6)
 710                struct inet6_skb_parm   h6;
 711#endif
 712        } header;       /* For incoming frames          */
 713        __u32           seq;            /* Starting sequence number     */
 714        __u32           end_seq;        /* SEQ + FIN + SYN + datalen    */
 715        __u32           when;           /* used to compute rtt's        */
 716        __u8            tcp_flags;      /* TCP header flags. (tcp[13])  */
 717
 718        __u8            sacked;         /* State flags for SACK/FACK.   */
 719#define TCPCB_SACKED_ACKED      0x01    /* SKB ACK'd by a SACK block    */
 720#define TCPCB_SACKED_RETRANS    0x02    /* SKB retransmitted            */
 721#define TCPCB_LOST              0x04    /* SKB is lost                  */
 722#define TCPCB_TAGBITS           0x07    /* All tag bits                 */
 723#define TCPCB_EVER_RETRANS      0x80    /* Ever retransmitted frame     */
 724#define TCPCB_RETRANS           (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
 725
 726        __u8            ip_dsfield;     /* IPv4 tos or IPv6 dsfield     */
 727        /* 1 byte hole */
 728        __u32           ack_seq;        /* Sequence number ACK'd        */
 729};
 730
 731#define TCP_SKB_CB(__skb)       ((struct tcp_skb_cb *)&((__skb)->cb[0]))
 732
 733/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set
 734 *
 735 * If we receive a SYN packet with these bits set, it means a network is
 736 * playing bad games with TOS bits. In order to avoid possible false congestion
 737 * notifications, we disable TCP ECN negociation.
 738 */
 739static inline void
 740TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb,
 741                struct net *net)
 742{
 743        const struct tcphdr *th = tcp_hdr(skb);
 744
 745        if (net->ipv4.sysctl_tcp_ecn && th->ece && th->cwr &&
 746            INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield))
 747                inet_rsk(req)->ecn_ok = 1;
 748}
 749
 750/* Due to TSO, an SKB can be composed of multiple actual
 751 * packets.  To keep these tracked properly, we use this.
 752 */
 753static inline int tcp_skb_pcount(const struct sk_buff *skb)
 754{
 755        return skb_shinfo(skb)->gso_segs;
 756}
 757
 758/* This is valid iff tcp_skb_pcount() > 1. */
 759static inline int tcp_skb_mss(const struct sk_buff *skb)
 760{
 761        return skb_shinfo(skb)->gso_size;
 762}
 763
 764/* Events passed to congestion control interface */
 765enum tcp_ca_event {
 766        CA_EVENT_TX_START,      /* first transmit when no packets in flight */
 767        CA_EVENT_CWND_RESTART,  /* congestion window restart */
 768        CA_EVENT_COMPLETE_CWR,  /* end of congestion recovery */
 769        CA_EVENT_LOSS,          /* loss timeout */
 770        CA_EVENT_FAST_ACK,      /* in sequence ack */
 771        CA_EVENT_SLOW_ACK,      /* other ack */
 772};
 773
 774/*
 775 * Interface for adding new TCP congestion control handlers
 776 */
 777#define TCP_CA_NAME_MAX 16
 778#define TCP_CA_MAX      128
 779#define TCP_CA_BUF_MAX  (TCP_CA_NAME_MAX*TCP_CA_MAX)
 780
 781#define TCP_CONG_NON_RESTRICTED 0x1
 782#define TCP_CONG_RTT_STAMP      0x2
 783
 784struct tcp_congestion_ops {
 785        struct list_head        list;
 786        unsigned long flags;
 787
 788        /* initialize private data (optional) */
 789        void (*init)(struct sock *sk);
 790        /* cleanup private data  (optional) */
 791        void (*release)(struct sock *sk);
 792
 793        /* return slow start threshold (required) */
 794        u32 (*ssthresh)(struct sock *sk);
 795        /* lower bound for congestion window (optional) */
 796        u32 (*min_cwnd)(const struct sock *sk);
 797        /* do new cwnd calculation (required) */
 798        void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked, u32 in_flight);
 799        /* call before changing ca_state (optional) */
 800        void (*set_state)(struct sock *sk, u8 new_state);
 801        /* call when cwnd event occurs (optional) */
 802        void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
 803        /* new value of cwnd after loss (optional) */
 804        u32  (*undo_cwnd)(struct sock *sk);
 805        /* hook for packet ack accounting (optional) */
 806        void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
 807        /* get info for inet_diag (optional) */
 808        void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
 809
 810        char            name[TCP_CA_NAME_MAX];
 811        struct module   *owner;
 812};
 813
 814int tcp_register_congestion_control(struct tcp_congestion_ops *type);
 815void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
 816
 817void tcp_init_congestion_control(struct sock *sk);
 818void tcp_cleanup_congestion_control(struct sock *sk);
 819int tcp_set_default_congestion_control(const char *name);
 820void tcp_get_default_congestion_control(char *name);
 821void tcp_get_available_congestion_control(char *buf, size_t len);
 822void tcp_get_allowed_congestion_control(char *buf, size_t len);
 823int tcp_set_allowed_congestion_control(char *allowed);
 824int tcp_set_congestion_control(struct sock *sk, const char *name);
 825int tcp_slow_start(struct tcp_sock *tp, u32 acked);
 826void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
 827
 828extern struct tcp_congestion_ops tcp_init_congestion_ops;
 829u32 tcp_reno_ssthresh(struct sock *sk);
 830void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight);
 831u32 tcp_reno_min_cwnd(const struct sock *sk);
 832extern struct tcp_congestion_ops tcp_reno;
 833
 834static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
 835{
 836        struct inet_connection_sock *icsk = inet_csk(sk);
 837
 838        if (icsk->icsk_ca_ops->set_state)
 839                icsk->icsk_ca_ops->set_state(sk, ca_state);
 840        icsk->icsk_ca_state = ca_state;
 841}
 842
 843static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
 844{
 845        const struct inet_connection_sock *icsk = inet_csk(sk);
 846
 847        if (icsk->icsk_ca_ops->cwnd_event)
 848                icsk->icsk_ca_ops->cwnd_event(sk, event);
 849}
 850
 851/* These functions determine how the current flow behaves in respect of SACK
 852 * handling. SACK is negotiated with the peer, and therefore it can vary
 853 * between different flows.
 854 *
 855 * tcp_is_sack - SACK enabled
 856 * tcp_is_reno - No SACK
 857 * tcp_is_fack - FACK enabled, implies SACK enabled
 858 */
 859static inline int tcp_is_sack(const struct tcp_sock *tp)
 860{
 861        return tp->rx_opt.sack_ok;
 862}
 863
 864static inline bool tcp_is_reno(const struct tcp_sock *tp)
 865{
 866        return !tcp_is_sack(tp);
 867}
 868
 869static inline bool tcp_is_fack(const struct tcp_sock *tp)
 870{
 871        return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
 872}
 873
 874static inline void tcp_enable_fack(struct tcp_sock *tp)
 875{
 876        tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
 877}
 878
 879/* TCP early-retransmit (ER) is similar to but more conservative than
 880 * the thin-dupack feature.  Enable ER only if thin-dupack is disabled.
 881 */
 882static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
 883{
 884        tp->do_early_retrans = sysctl_tcp_early_retrans &&
 885                sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack &&
 886                sysctl_tcp_reordering == 3;
 887}
 888
 889static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
 890{
 891        tp->do_early_retrans = 0;
 892}
 893
 894static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
 895{
 896        return tp->sacked_out + tp->lost_out;
 897}
 898
 899/* This determines how many packets are "in the network" to the best
 900 * of our knowledge.  In many cases it is conservative, but where
 901 * detailed information is available from the receiver (via SACK
 902 * blocks etc.) we can make more aggressive calculations.
 903 *
 904 * Use this for decisions involving congestion control, use just
 905 * tp->packets_out to determine if the send queue is empty or not.
 906 *
 907 * Read this equation as:
 908 *
 909 *      "Packets sent once on transmission queue" MINUS
 910 *      "Packets left network, but not honestly ACKed yet" PLUS
 911 *      "Packets fast retransmitted"
 912 */
 913static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
 914{
 915        return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
 916}
 917
 918#define TCP_INFINITE_SSTHRESH   0x7fffffff
 919
 920static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
 921{
 922        return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
 923}
 924
 925static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
 926{
 927        return (TCPF_CA_CWR | TCPF_CA_Recovery) &
 928               (1 << inet_csk(sk)->icsk_ca_state);
 929}
 930
 931/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
 932 * The exception is cwnd reduction phase, when cwnd is decreasing towards
 933 * ssthresh.
 934 */
 935static inline __u32 tcp_current_ssthresh(const struct sock *sk)
 936{
 937        const struct tcp_sock *tp = tcp_sk(sk);
 938
 939        if (tcp_in_cwnd_reduction(sk))
 940                return tp->snd_ssthresh;
 941        else
 942                return max(tp->snd_ssthresh,
 943                           ((tp->snd_cwnd >> 1) +
 944                            (tp->snd_cwnd >> 2)));
 945}
 946
 947/* Use define here intentionally to get WARN_ON location shown at the caller */
 948#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
 949
 950void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
 951__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
 952
 953/* The maximum number of MSS of available cwnd for which TSO defers
 954 * sending if not using sysctl_tcp_tso_win_divisor.
 955 */
 956static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
 957{
 958        return 3;
 959}
 960
 961/* Slow start with delack produces 3 packets of burst, so that
 962 * it is safe "de facto".  This will be the default - same as
 963 * the default reordering threshold - but if reordering increases,
 964 * we must be able to allow cwnd to burst at least this much in order
 965 * to not pull it back when holes are filled.
 966 */
 967static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
 968{
 969        return tp->reordering;
 970}
 971
 972/* Returns end sequence number of the receiver's advertised window */
 973static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
 974{
 975        return tp->snd_una + tp->snd_wnd;
 976}
 977bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
 978
 979static inline void tcp_check_probe_timer(struct sock *sk)
 980{
 981        const struct tcp_sock *tp = tcp_sk(sk);
 982        const struct inet_connection_sock *icsk = inet_csk(sk);
 983
 984        if (!tp->packets_out && !icsk->icsk_pending)
 985                inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
 986                                          icsk->icsk_rto, TCP_RTO_MAX);
 987}
 988
 989static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
 990{
 991        tp->snd_wl1 = seq;
 992}
 993
 994static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
 995{
 996        tp->snd_wl1 = seq;
 997}
 998
 999/*
1000 * Calculate(/check) TCP checksum
1001 */
1002static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1003                                   __be32 daddr, __wsum base)
1004{
1005        return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1006}
1007
1008static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
1009{
1010        return __skb_checksum_complete(skb);
1011}
1012
1013static inline bool tcp_checksum_complete(struct sk_buff *skb)
1014{
1015        return !skb_csum_unnecessary(skb) &&
1016                __tcp_checksum_complete(skb);
1017}
1018
1019/* Prequeue for VJ style copy to user, combined with checksumming. */
1020
1021static inline void tcp_prequeue_init(struct tcp_sock *tp)
1022{
1023        tp->ucopy.task = NULL;
1024        tp->ucopy.len = 0;
1025        tp->ucopy.memory = 0;
1026        skb_queue_head_init(&tp->ucopy.prequeue);
1027#ifdef CONFIG_NET_DMA
1028        tp->ucopy.dma_chan = NULL;
1029        tp->ucopy.wakeup = 0;
1030        tp->ucopy.pinned_list = NULL;
1031        tp->ucopy.dma_cookie = 0;
1032#endif
1033}
1034
1035bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
1036
1037#undef STATE_TRACE
1038
1039#ifdef STATE_TRACE
1040static const char *statename[]={
1041        "Unused","Established","Syn Sent","Syn Recv",
1042        "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1043        "Close Wait","Last ACK","Listen","Closing"
1044};
1045#endif
1046void tcp_set_state(struct sock *sk, int state);
1047
1048void tcp_done(struct sock *sk);
1049
1050static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1051{
1052        rx_opt->dsack = 0;
1053        rx_opt->num_sacks = 0;
1054}
1055
1056u32 tcp_default_init_rwnd(u32 mss);
1057
1058/* Determine a window scaling and initial window to offer. */
1059void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
1060                               __u32 *window_clamp, int wscale_ok,
1061                               __u8 *rcv_wscale, __u32 init_rcv_wnd);
1062
1063static inline int tcp_win_from_space(int space)
1064{
1065        return sysctl_tcp_adv_win_scale<=0 ?
1066                (space>>(-sysctl_tcp_adv_win_scale)) :
1067                space - (space>>sysctl_tcp_adv_win_scale);
1068}
1069
1070/* Note: caller must be prepared to deal with negative returns */ 
1071static inline int tcp_space(const struct sock *sk)
1072{
1073        return tcp_win_from_space(sk->sk_rcvbuf -
1074                                  atomic_read(&sk->sk_rmem_alloc));
1075} 
1076
1077static inline int tcp_full_space(const struct sock *sk)
1078{
1079        return tcp_win_from_space(sk->sk_rcvbuf); 
1080}
1081
1082static inline void tcp_openreq_init(struct request_sock *req,
1083                                    struct tcp_options_received *rx_opt,
1084                                    struct sk_buff *skb)
1085{
1086        struct inet_request_sock *ireq = inet_rsk(req);
1087
1088        req->rcv_wnd = 0;               /* So that tcp_send_synack() knows! */
1089        req->cookie_ts = 0;
1090        tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1091        tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
1092        tcp_rsk(req)->snt_synack = 0;
1093        req->mss = rx_opt->mss_clamp;
1094        req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
1095        ireq->tstamp_ok = rx_opt->tstamp_ok;
1096        ireq->sack_ok = rx_opt->sack_ok;
1097        ireq->snd_wscale = rx_opt->snd_wscale;
1098        ireq->wscale_ok = rx_opt->wscale_ok;
1099        ireq->acked = 0;
1100        ireq->ecn_ok = 0;
1101        ireq->ir_rmt_port = tcp_hdr(skb)->source;
1102        ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
1103}
1104
1105void tcp_enter_memory_pressure(struct sock *sk);
1106
1107static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1108{
1109        return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1110}
1111
1112static inline int keepalive_time_when(const struct tcp_sock *tp)
1113{
1114        return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1115}
1116
1117static inline int keepalive_probes(const struct tcp_sock *tp)
1118{
1119        return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
1120}
1121
1122static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1123{
1124        const struct inet_connection_sock *icsk = &tp->inet_conn;
1125
1126        return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
1127                          tcp_time_stamp - tp->rcv_tstamp);
1128}
1129
1130static inline int tcp_fin_time(const struct sock *sk)
1131{
1132        int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1133        const int rto = inet_csk(sk)->icsk_rto;
1134
1135        if (fin_timeout < (rto << 2) - (rto >> 1))
1136                fin_timeout = (rto << 2) - (rto >> 1);
1137
1138        return fin_timeout;
1139}
1140
1141static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1142                                  int paws_win)
1143{
1144        if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1145                return true;
1146        if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1147                return true;
1148        /*
1149         * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1150         * then following tcp messages have valid values. Ignore 0 value,
1151         * or else 'negative' tsval might forbid us to accept their packets.
1152         */
1153        if (!rx_opt->ts_recent)
1154                return true;
1155        return false;
1156}
1157
1158static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1159                                   int rst)
1160{
1161        if (tcp_paws_check(rx_opt, 0))
1162                return false;
1163
1164        /* RST segments are not recommended to carry timestamp,
1165           and, if they do, it is recommended to ignore PAWS because
1166           "their cleanup function should take precedence over timestamps."
1167           Certainly, it is mistake. It is necessary to understand the reasons
1168           of this constraint to relax it: if peer reboots, clock may go
1169           out-of-sync and half-open connections will not be reset.
1170           Actually, the problem would be not existing if all
1171           the implementations followed draft about maintaining clock
1172           via reboots. Linux-2.2 DOES NOT!
1173
1174           However, we can relax time bounds for RST segments to MSL.
1175         */
1176        if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1177                return false;
1178        return true;
1179}
1180
1181static inline void tcp_mib_init(struct net *net)
1182{
1183        /* See RFC 2012 */
1184        TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1185        TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1186        TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1187        TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
1188}
1189
1190/* from STCP */
1191static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1192{
1193        tp->lost_skb_hint = NULL;
1194}
1195
1196static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1197{
1198        tcp_clear_retrans_hints_partial(tp);
1199        tp->retransmit_skb_hint = NULL;
1200}
1201
1202/* MD5 Signature */
1203struct crypto_hash;
1204
1205union tcp_md5_addr {
1206        struct in_addr  a4;
1207#if IS_ENABLED(CONFIG_IPV6)
1208        struct in6_addr a6;
1209#endif
1210};
1211
1212/* - key database */
1213struct tcp_md5sig_key {
1214        struct hlist_node       node;
1215        u8                      keylen;
1216        u8                      family; /* AF_INET or AF_INET6 */
1217        union tcp_md5_addr      addr;
1218        u8                      key[TCP_MD5SIG_MAXKEYLEN];
1219        struct rcu_head         rcu;
1220};
1221
1222/* - sock block */
1223struct tcp_md5sig_info {
1224        struct hlist_head       head;
1225        struct rcu_head         rcu;
1226};
1227
1228/* - pseudo header */
1229struct tcp4_pseudohdr {
1230        __be32          saddr;
1231        __be32          daddr;
1232        __u8            pad;
1233        __u8            protocol;
1234        __be16          len;
1235};
1236
1237struct tcp6_pseudohdr {
1238        struct in6_addr saddr;
1239        struct in6_addr daddr;
1240        __be32          len;
1241        __be32          protocol;       /* including padding */
1242};
1243
1244union tcp_md5sum_block {
1245        struct tcp4_pseudohdr ip4;
1246#if IS_ENABLED(CONFIG_IPV6)
1247        struct tcp6_pseudohdr ip6;
1248#endif
1249};
1250
1251/* - pool: digest algorithm, hash description and scratch buffer */
1252struct tcp_md5sig_pool {
1253        struct hash_desc        md5_desc;
1254        union tcp_md5sum_block  md5_blk;
1255};
1256
1257/* - functions */
1258int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1259                        const struct sock *sk, const struct request_sock *req,
1260                        const struct sk_buff *skb);
1261int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1262                   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
1263int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1264                   int family);
1265struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
1266                                         struct sock *addr_sk);
1267
1268#ifdef CONFIG_TCP_MD5SIG
1269struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1270                                         const union tcp_md5_addr *addr,
1271                                         int family);
1272#define tcp_twsk_md5_key(twsk)  ((twsk)->tw_md5_key)
1273#else
1274static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1275                                         const union tcp_md5_addr *addr,
1276                                         int family)
1277{
1278        return NULL;
1279}
1280#define tcp_twsk_md5_key(twsk)  NULL
1281#endif
1282
1283bool tcp_alloc_md5sig_pool(void);
1284
1285struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1286static inline void tcp_put_md5sig_pool(void)
1287{
1288        local_bh_enable();
1289}
1290
1291int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
1292int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1293                          unsigned int header_len);
1294int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1295                     const struct tcp_md5sig_key *key);
1296
1297/* From tcp_fastopen.c */
1298void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1299                            struct tcp_fastopen_cookie *cookie, int *syn_loss,
1300                            unsigned long *last_syn_loss);
1301void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1302                            struct tcp_fastopen_cookie *cookie, bool syn_lost);
1303struct tcp_fastopen_request {
1304        /* Fast Open cookie. Size 0 means a cookie request */
1305        struct tcp_fastopen_cookie      cookie;
1306        struct msghdr                   *data;  /* data in MSG_FASTOPEN */
1307        size_t                          size;
1308        int                             copied; /* queued in tcp_connect() */
1309};
1310void tcp_free_fastopen_req(struct tcp_sock *tp);
1311
1312extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
1313int tcp_fastopen_reset_cipher(void *key, unsigned int len);
1314void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
1315                             struct tcp_fastopen_cookie *foc);
1316void tcp_fastopen_init_key_once(bool publish);
1317#define TCP_FASTOPEN_KEY_LENGTH 16
1318
1319/* Fastopen key context */
1320struct tcp_fastopen_context {
1321        struct crypto_cipher    *tfm;
1322        __u8                    key[TCP_FASTOPEN_KEY_LENGTH];
1323        struct rcu_head         rcu;
1324};
1325
1326/* write queue abstraction */
1327static inline void tcp_write_queue_purge(struct sock *sk)
1328{
1329        struct sk_buff *skb;
1330
1331        while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
1332                sk_wmem_free_skb(sk, skb);
1333        sk_mem_reclaim(sk);
1334        tcp_clear_all_retrans_hints(tcp_sk(sk));
1335}
1336
1337static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1338{
1339        return skb_peek(&sk->sk_write_queue);
1340}
1341
1342static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1343{
1344        return skb_peek_tail(&sk->sk_write_queue);
1345}
1346
1347static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1348                                                   const struct sk_buff *skb)
1349{
1350        return skb_queue_next(&sk->sk_write_queue, skb);
1351}
1352
1353static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1354                                                   const struct sk_buff *skb)
1355{
1356        return skb_queue_prev(&sk->sk_write_queue, skb);
1357}
1358
1359#define tcp_for_write_queue(skb, sk)                                    \
1360        skb_queue_walk(&(sk)->sk_write_queue, skb)
1361
1362#define tcp_for_write_queue_from(skb, sk)                               \
1363        skb_queue_walk_from(&(sk)->sk_write_queue, skb)
1364
1365#define tcp_for_write_queue_from_safe(skb, tmp, sk)                     \
1366        skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1367
1368static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1369{
1370        return sk->sk_send_head;
1371}
1372
1373static inline bool tcp_skb_is_last(const struct sock *sk,
1374                                   const struct sk_buff *skb)
1375{
1376        return skb_queue_is_last(&sk->sk_write_queue, skb);
1377}
1378
1379static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
1380{
1381        if (tcp_skb_is_last(sk, skb))
1382                sk->sk_send_head = NULL;
1383        else
1384                sk->sk_send_head = tcp_write_queue_next(sk, skb);
1385}
1386
1387static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1388{
1389        if (sk->sk_send_head == skb_unlinked)
1390                sk->sk_send_head = NULL;
1391}
1392
1393static inline void tcp_init_send_head(struct sock *sk)
1394{
1395        sk->sk_send_head = NULL;
1396}
1397
1398static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1399{
1400        __skb_queue_tail(&sk->sk_write_queue, skb);
1401}
1402
1403static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1404{
1405        __tcp_add_write_queue_tail(sk, skb);
1406
1407        /* Queue it, remembering where we must start sending. */
1408        if (sk->sk_send_head == NULL) {
1409                sk->sk_send_head = skb;
1410
1411                if (tcp_sk(sk)->highest_sack == NULL)
1412                        tcp_sk(sk)->highest_sack = skb;
1413        }
1414}
1415
1416static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1417{
1418        __skb_queue_head(&sk->sk_write_queue, skb);
1419}
1420
1421/* Insert buff after skb on the write queue of sk.  */
1422static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1423                                                struct sk_buff *buff,
1424                                                struct sock *sk)
1425{
1426        __skb_queue_after(&sk->sk_write_queue, skb, buff);
1427}
1428
1429/* Insert new before skb on the write queue of sk.  */
1430static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1431                                                  struct sk_buff *skb,
1432                                                  struct sock *sk)
1433{
1434        __skb_queue_before(&sk->sk_write_queue, skb, new);
1435
1436        if (sk->sk_send_head == skb)
1437                sk->sk_send_head = new;
1438}
1439
1440static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1441{
1442        __skb_unlink(skb, &sk->sk_write_queue);
1443}
1444
1445static inline bool tcp_write_queue_empty(struct sock *sk)
1446{
1447        return skb_queue_empty(&sk->sk_write_queue);
1448}
1449
1450static inline void tcp_push_pending_frames(struct sock *sk)
1451{
1452        if (tcp_send_head(sk)) {
1453                struct tcp_sock *tp = tcp_sk(sk);
1454
1455                __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1456        }
1457}
1458
1459/* Start sequence of the skb just after the highest skb with SACKed
1460 * bit, valid only if sacked_out > 0 or when the caller has ensured
1461 * validity by itself.
1462 */
1463static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1464{
1465        if (!tp->sacked_out)
1466                return tp->snd_una;
1467
1468        if (tp->highest_sack == NULL)
1469                return tp->snd_nxt;
1470
1471        return TCP_SKB_CB(tp->highest_sack)->seq;
1472}
1473
1474static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1475{
1476        tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1477                                                tcp_write_queue_next(sk, skb);
1478}
1479
1480static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1481{
1482        return tcp_sk(sk)->highest_sack;
1483}
1484
1485static inline void tcp_highest_sack_reset(struct sock *sk)
1486{
1487        tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1488}
1489
1490/* Called when old skb is about to be deleted (to be combined with new skb) */
1491static inline void tcp_highest_sack_combine(struct sock *sk,
1492                                            struct sk_buff *old,
1493                                            struct sk_buff *new)
1494{
1495        if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1496                tcp_sk(sk)->highest_sack = new;
1497}
1498
1499/* Determines whether this is a thin stream (which may suffer from
1500 * increased latency). Used to trigger latency-reducing mechanisms.
1501 */
1502static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
1503{
1504        return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1505}
1506
1507/* /proc */
1508enum tcp_seq_states {
1509        TCP_SEQ_STATE_LISTENING,
1510        TCP_SEQ_STATE_OPENREQ,
1511        TCP_SEQ_STATE_ESTABLISHED,
1512};
1513
1514int tcp_seq_open(struct inode *inode, struct file *file);
1515
1516struct tcp_seq_afinfo {
1517        char                            *name;
1518        sa_family_t                     family;
1519        const struct file_operations    *seq_fops;
1520        struct seq_operations           seq_ops;
1521};
1522
1523struct tcp_iter_state {
1524        struct seq_net_private  p;
1525        sa_family_t             family;
1526        enum tcp_seq_states     state;
1527        struct sock             *syn_wait_sk;
1528        int                     bucket, offset, sbucket, num;
1529        kuid_t                  uid;
1530        loff_t                  last_pos;
1531};
1532
1533int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1534void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
1535
1536extern struct request_sock_ops tcp_request_sock_ops;
1537extern struct request_sock_ops tcp6_request_sock_ops;
1538
1539void tcp_v4_destroy_sock(struct sock *sk);
1540
1541struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1542                                netdev_features_t features);
1543struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
1544int tcp_gro_complete(struct sk_buff *skb);
1545
1546void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
1547
1548static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1549{
1550        return tp->notsent_lowat ?: sysctl_tcp_notsent_lowat;
1551}
1552
1553static inline bool tcp_stream_memory_free(const struct sock *sk)
1554{
1555        const struct tcp_sock *tp = tcp_sk(sk);
1556        u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
1557
1558        return notsent_bytes < tcp_notsent_lowat(tp);
1559}
1560
1561#ifdef CONFIG_PROC_FS
1562int tcp4_proc_init(void);
1563void tcp4_proc_exit(void);
1564#endif
1565
1566/* TCP af-specific functions */
1567struct tcp_sock_af_ops {
1568#ifdef CONFIG_TCP_MD5SIG
1569        struct tcp_md5sig_key   *(*md5_lookup) (struct sock *sk,
1570                                                struct sock *addr_sk);
1571        int                     (*calc_md5_hash) (char *location,
1572                                                  struct tcp_md5sig_key *md5,
1573                                                  const struct sock *sk,
1574                                                  const struct request_sock *req,
1575                                                  const struct sk_buff *skb);
1576        int                     (*md5_parse) (struct sock *sk,
1577                                              char __user *optval,
1578                                              int optlen);
1579#endif
1580};
1581
1582struct tcp_request_sock_ops {
1583#ifdef CONFIG_TCP_MD5SIG
1584        struct tcp_md5sig_key   *(*md5_lookup) (struct sock *sk,
1585                                                struct request_sock *req);
1586        int                     (*calc_md5_hash) (char *location,
1587                                                  struct tcp_md5sig_key *md5,
1588                                                  const struct sock *sk,
1589                                                  const struct request_sock *req,
1590                                                  const struct sk_buff *skb);
1591#endif
1592};
1593
1594int tcpv4_offload_init(void);
1595
1596void tcp_v4_init(void);
1597void tcp_init(void);
1598
1599#endif  /* _TCP_H */
1600
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.