linux/include/net/tcp.h
<<
>>
Prefs
   1/*
   2 * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3 *              operating system.  INET is implemented using the  BSD Socket
   4 *              interface as the means of communication with the user level.
   5 *
   6 *              Definitions for the TCP module.
   7 *
   8 * Version:     @(#)tcp.h       1.0.5   05/23/93
   9 *
  10 * Authors:     Ross Biro
  11 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *
  13 *              This program is free software; you can redistribute it and/or
  14 *              modify it under the terms of the GNU General Public License
  15 *              as published by the Free Software Foundation; either version
  16 *              2 of the License, or (at your option) any later version.
  17 */
  18#ifndef _TCP_H
  19#define _TCP_H
  20
  21#define FASTRETRANS_DEBUG 1
  22
  23#include <linux/list.h>
  24#include <linux/tcp.h>
  25#include <linux/bug.h>
  26#include <linux/slab.h>
  27#include <linux/cache.h>
  28#include <linux/percpu.h>
  29#include <linux/skbuff.h>
  30#include <linux/dmaengine.h>
  31#include <linux/crypto.h>
  32#include <linux/cryptohash.h>
  33#include <linux/kref.h>
  34#include <linux/ktime.h>
  35
  36#include <net/inet_connection_sock.h>
  37#include <net/inet_timewait_sock.h>
  38#include <net/inet_hashtables.h>
  39#include <net/checksum.h>
  40#include <net/request_sock.h>
  41#include <net/sock.h>
  42#include <net/snmp.h>
  43#include <net/ip.h>
  44#include <net/tcp_states.h>
  45#include <net/inet_ecn.h>
  46#include <net/dst.h>
  47
  48#include <linux/seq_file.h>
  49#include <linux/memcontrol.h>
  50
  51extern struct inet_hashinfo tcp_hashinfo;
  52
  53extern struct percpu_counter tcp_orphan_count;
  54void tcp_time_wait(struct sock *sk, int state, int timeo);
  55
  56#define MAX_TCP_HEADER  (128 + MAX_HEADER)
  57#define MAX_TCP_OPTION_SPACE 40
  58
  59/* 
  60 * Never offer a window over 32767 without using window scaling. Some
  61 * poor stacks do signed 16bit maths! 
  62 */
  63#define MAX_TCP_WINDOW          32767U
  64
  65/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
  66#define TCP_MIN_MSS             88U
  67
  68/* The least MTU to use for probing */
  69#define TCP_BASE_MSS            512
  70
  71/* After receiving this amount of duplicate ACKs fast retransmit starts. */
  72#define TCP_FASTRETRANS_THRESH 3
  73
  74/* Maximal reordering. */
  75#define TCP_MAX_REORDERING      127
  76
  77/* Maximal number of ACKs sent quickly to accelerate slow-start. */
  78#define TCP_MAX_QUICKACKS       16U
  79
  80/* urg_data states */
  81#define TCP_URG_VALID   0x0100
  82#define TCP_URG_NOTYET  0x0200
  83#define TCP_URG_READ    0x0400
  84
  85#define TCP_RETR1       3       /*
  86                                 * This is how many retries it does before it
  87                                 * tries to figure out if the gateway is
  88                                 * down. Minimal RFC value is 3; it corresponds
  89                                 * to ~3sec-8min depending on RTO.
  90                                 */
  91
  92#define TCP_RETR2       15      /*
  93                                 * This should take at least
  94                                 * 90 minutes to time out.
  95                                 * RFC1122 says that the limit is 100 sec.
  96                                 * 15 is ~13-30min depending on RTO.
  97                                 */
  98
  99#define TCP_SYN_RETRIES  6      /* This is how many retries are done
 100                                 * when active opening a connection.
 101                                 * RFC1122 says the minimum retry MUST
 102                                 * be at least 180secs.  Nevertheless
 103                                 * this value is corresponding to
 104                                 * 63secs of retransmission with the
 105                                 * current initial RTO.
 106                                 */
 107
 108#define TCP_SYNACK_RETRIES 5    /* This is how may retries are done
 109                                 * when passive opening a connection.
 110                                 * This is corresponding to 31secs of
 111                                 * retransmission with the current
 112                                 * initial RTO.
 113                                 */
 114
 115#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
 116                                  * state, about 60 seconds     */
 117#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
 118                                 /* BSD style FIN_WAIT2 deadlock breaker.
 119                                  * It used to be 3min, new value is 60sec,
 120                                  * to combine FIN-WAIT-2 timeout with
 121                                  * TIME-WAIT timer.
 122                                  */
 123
 124#define TCP_DELACK_MAX  ((unsigned)(HZ/5))      /* maximal time to delay before sending an ACK */
 125#if HZ >= 100
 126#define TCP_DELACK_MIN  ((unsigned)(HZ/25))     /* minimal time to delay before sending an ACK */
 127#define TCP_ATO_MIN     ((unsigned)(HZ/25))
 128#else
 129#define TCP_DELACK_MIN  4U
 130#define TCP_ATO_MIN     4U
 131#endif
 132#define TCP_RTO_MAX     ((unsigned)(120*HZ))
 133#define TCP_RTO_MIN     ((unsigned)(HZ/5))
 134#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))     /* RFC6298 2.1 initial RTO value        */
 135#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
 136                                                 * used as a fallback RTO for the
 137                                                 * initial data transmission if no
 138                                                 * valid RTT sample has been acquired,
 139                                                 * most likely due to retrans in 3WHS.
 140                                                 */
 141
 142#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
 143                                                         * for local resources.
 144                                                         */
 145
 146#define TCP_KEEPALIVE_TIME      (120*60*HZ)     /* two hours */
 147#define TCP_KEEPALIVE_PROBES    9               /* Max of 9 keepalive probes    */
 148#define TCP_KEEPALIVE_INTVL     (75*HZ)
 149
 150#define MAX_TCP_KEEPIDLE        32767
 151#define MAX_TCP_KEEPINTVL       32767
 152#define MAX_TCP_KEEPCNT         127
 153#define MAX_TCP_SYNCNT          127
 154
 155#define TCP_SYNQ_INTERVAL       (HZ/5)  /* Period of SYNACK timer */
 156
 157#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
 158#define TCP_PAWS_MSL    60              /* Per-host timestamps are invalidated
 159                                         * after this time. It should be equal
 160                                         * (or greater than) TCP_TIMEWAIT_LEN
 161                                         * to provide reliability equal to one
 162                                         * provided by timewait state.
 163                                         */
 164#define TCP_PAWS_WINDOW 1               /* Replay window for per-host
 165                                         * timestamps. It must be less than
 166                                         * minimal timewait lifetime.
 167                                         */
 168/*
 169 *      TCP option
 170 */
 171 
 172#define TCPOPT_NOP              1       /* Padding */
 173#define TCPOPT_EOL              0       /* End of options */
 174#define TCPOPT_MSS              2       /* Segment size negotiating */
 175#define TCPOPT_WINDOW           3       /* Window scaling */
 176#define TCPOPT_SACK_PERM        4       /* SACK Permitted */
 177#define TCPOPT_SACK             5       /* SACK Block */
 178#define TCPOPT_TIMESTAMP        8       /* Better RTT estimations/PAWS */
 179#define TCPOPT_MD5SIG           19      /* MD5 Signature (RFC2385) */
 180#define TCPOPT_EXP              254     /* Experimental */
 181/* Magic number to be after the option value for sharing TCP
 182 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
 183 */
 184#define TCPOPT_FASTOPEN_MAGIC   0xF989
 185
 186/*
 187 *     TCP option lengths
 188 */
 189
 190#define TCPOLEN_MSS            4
 191#define TCPOLEN_WINDOW         3
 192#define TCPOLEN_SACK_PERM      2
 193#define TCPOLEN_TIMESTAMP      10
 194#define TCPOLEN_MD5SIG         18
 195#define TCPOLEN_EXP_FASTOPEN_BASE  4
 196
 197/* But this is what stacks really send out. */
 198#define TCPOLEN_TSTAMP_ALIGNED          12
 199#define TCPOLEN_WSCALE_ALIGNED          4
 200#define TCPOLEN_SACKPERM_ALIGNED        4
 201#define TCPOLEN_SACK_BASE               2
 202#define TCPOLEN_SACK_BASE_ALIGNED       4
 203#define TCPOLEN_SACK_PERBLOCK           8
 204#define TCPOLEN_MD5SIG_ALIGNED          20
 205#define TCPOLEN_MSS_ALIGNED             4
 206
 207/* Flags in tp->nonagle */
 208#define TCP_NAGLE_OFF           1       /* Nagle's algo is disabled */
 209#define TCP_NAGLE_CORK          2       /* Socket is corked         */
 210#define TCP_NAGLE_PUSH          4       /* Cork is overridden for already queued data */
 211
 212/* TCP thin-stream limits */
 213#define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
 214
 215/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */
 216#define TCP_INIT_CWND           10
 217
 218/* Bit Flags for sysctl_tcp_fastopen */
 219#define TFO_CLIENT_ENABLE       1
 220#define TFO_SERVER_ENABLE       2
 221#define TFO_CLIENT_NO_COOKIE    4       /* Data in SYN w/o cookie option */
 222
 223/* Accept SYN data w/o any cookie option */
 224#define TFO_SERVER_COOKIE_NOT_REQD      0x200
 225
 226/* Force enable TFO on all listeners, i.e., not requiring the
 227 * TCP_FASTOPEN socket option. SOCKOPT1/2 determine how to set max_qlen.
 228 */
 229#define TFO_SERVER_WO_SOCKOPT1  0x400
 230#define TFO_SERVER_WO_SOCKOPT2  0x800
 231
 232extern struct inet_timewait_death_row tcp_death_row;
 233
 234/* sysctl variables for tcp */
 235extern int sysctl_tcp_timestamps;
 236extern int sysctl_tcp_window_scaling;
 237extern int sysctl_tcp_sack;
 238extern int sysctl_tcp_fin_timeout;
 239extern int sysctl_tcp_keepalive_time;
 240extern int sysctl_tcp_keepalive_probes;
 241extern int sysctl_tcp_keepalive_intvl;
 242extern int sysctl_tcp_syn_retries;
 243extern int sysctl_tcp_synack_retries;
 244extern int sysctl_tcp_retries1;
 245extern int sysctl_tcp_retries2;
 246extern int sysctl_tcp_orphan_retries;
 247extern int sysctl_tcp_syncookies;
 248extern int sysctl_tcp_fastopen;
 249extern int sysctl_tcp_retrans_collapse;
 250extern int sysctl_tcp_stdurg;
 251extern int sysctl_tcp_rfc1337;
 252extern int sysctl_tcp_abort_on_overflow;
 253extern int sysctl_tcp_max_orphans;
 254extern int sysctl_tcp_fack;
 255extern int sysctl_tcp_reordering;
 256extern int sysctl_tcp_dsack;
 257extern long sysctl_tcp_mem[3];
 258extern int sysctl_tcp_wmem[3];
 259extern int sysctl_tcp_rmem[3];
 260extern int sysctl_tcp_app_win;
 261extern int sysctl_tcp_adv_win_scale;
 262extern int sysctl_tcp_tw_reuse;
 263extern int sysctl_tcp_frto;
 264extern int sysctl_tcp_low_latency;
 265extern int sysctl_tcp_dma_copybreak;
 266extern int sysctl_tcp_nometrics_save;
 267extern int sysctl_tcp_moderate_rcvbuf;
 268extern int sysctl_tcp_tso_win_divisor;
 269extern int sysctl_tcp_mtu_probing;
 270extern int sysctl_tcp_base_mss;
 271extern int sysctl_tcp_workaround_signed_windows;
 272extern int sysctl_tcp_slow_start_after_idle;
 273extern int sysctl_tcp_thin_linear_timeouts;
 274extern int sysctl_tcp_thin_dupack;
 275extern int sysctl_tcp_early_retrans;
 276extern int sysctl_tcp_limit_output_bytes;
 277extern int sysctl_tcp_challenge_ack_limit;
 278extern unsigned int sysctl_tcp_notsent_lowat;
 279extern int sysctl_tcp_min_tso_segs;
 280extern int sysctl_tcp_autocorking;
 281
 282extern atomic_long_t tcp_memory_allocated;
 283extern struct percpu_counter tcp_sockets_allocated;
 284extern int tcp_memory_pressure;
 285
 286/*
 287 * The next routines deal with comparing 32 bit unsigned ints
 288 * and worry about wraparound (automatic with unsigned arithmetic).
 289 */
 290
 291static inline bool before(__u32 seq1, __u32 seq2)
 292{
 293        return (__s32)(seq1-seq2) < 0;
 294}
 295#define after(seq2, seq1)       before(seq1, seq2)
 296
 297/* is s2<=s1<=s3 ? */
 298static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
 299{
 300        return seq3 - seq2 >= seq1 - seq2;
 301}
 302
 303static inline bool tcp_out_of_memory(struct sock *sk)
 304{
 305        if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
 306            sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
 307                return true;
 308        return false;
 309}
 310
 311static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
 312{
 313        struct percpu_counter *ocp = sk->sk_prot->orphan_count;
 314        int orphans = percpu_counter_read_positive(ocp);
 315
 316        if (orphans << shift > sysctl_tcp_max_orphans) {
 317                orphans = percpu_counter_sum_positive(ocp);
 318                if (orphans << shift > sysctl_tcp_max_orphans)
 319                        return true;
 320        }
 321        return false;
 322}
 323
 324bool tcp_check_oom(struct sock *sk, int shift);
 325
 326/* syncookies: remember time of last synqueue overflow */
 327static inline void tcp_synq_overflow(struct sock *sk)
 328{
 329        tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
 330}
 331
 332/* syncookies: no recent synqueue overflow on this listening socket? */
 333static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
 334{
 335        unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
 336        return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
 337}
 338
 339extern struct proto tcp_prot;
 340
 341#define TCP_INC_STATS(net, field)       SNMP_INC_STATS((net)->mib.tcp_statistics, field)
 342#define TCP_INC_STATS_BH(net, field)    SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
 343#define TCP_DEC_STATS(net, field)       SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
 344#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
 345#define TCP_ADD_STATS(net, field, val)  SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
 346
 347void tcp_tasklet_init(void);
 348
 349void tcp_v4_err(struct sk_buff *skb, u32);
 350
 351void tcp_shutdown(struct sock *sk, int how);
 352
 353void tcp_v4_early_demux(struct sk_buff *skb);
 354int tcp_v4_rcv(struct sk_buff *skb);
 355
 356int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
 357int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 358                size_t size);
 359int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
 360                 int flags);
 361void tcp_release_cb(struct sock *sk);
 362void tcp_wfree(struct sk_buff *skb);
 363void tcp_write_timer_handler(struct sock *sk);
 364void tcp_delack_timer_handler(struct sock *sk);
 365int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 366int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 367                          const struct tcphdr *th, unsigned int len);
 368void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
 369                         const struct tcphdr *th, unsigned int len);
 370void tcp_rcv_space_adjust(struct sock *sk);
 371void tcp_cleanup_rbuf(struct sock *sk, int copied);
 372int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
 373void tcp_twsk_destructor(struct sock *sk);
 374ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
 375                        struct pipe_inode_info *pipe, size_t len,
 376                        unsigned int flags);
 377
 378static inline void tcp_dec_quickack_mode(struct sock *sk,
 379                                         const unsigned int pkts)
 380{
 381        struct inet_connection_sock *icsk = inet_csk(sk);
 382
 383        if (icsk->icsk_ack.quick) {
 384                if (pkts >= icsk->icsk_ack.quick) {
 385                        icsk->icsk_ack.quick = 0;
 386                        /* Leaving quickack mode we deflate ATO. */
 387                        icsk->icsk_ack.ato   = TCP_ATO_MIN;
 388                } else
 389                        icsk->icsk_ack.quick -= pkts;
 390        }
 391}
 392
 393#define TCP_ECN_OK              1
 394#define TCP_ECN_QUEUE_CWR       2
 395#define TCP_ECN_DEMAND_CWR      4
 396#define TCP_ECN_SEEN            8
 397
 398enum tcp_tw_status {
 399        TCP_TW_SUCCESS = 0,
 400        TCP_TW_RST = 1,
 401        TCP_TW_ACK = 2,
 402        TCP_TW_SYN = 3
 403};
 404
 405
 406enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
 407                                              struct sk_buff *skb,
 408                                              const struct tcphdr *th);
 409struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
 410                           struct request_sock *req, struct request_sock **prev,
 411                           bool fastopen);
 412int tcp_child_process(struct sock *parent, struct sock *child,
 413                      struct sk_buff *skb);
 414void tcp_enter_loss(struct sock *sk, int how);
 415void tcp_clear_retrans(struct tcp_sock *tp);
 416void tcp_update_metrics(struct sock *sk);
 417void tcp_init_metrics(struct sock *sk);
 418void tcp_metrics_init(void);
 419bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
 420                        bool paws_check);
 421bool tcp_remember_stamp(struct sock *sk);
 422bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
 423void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
 424void tcp_disable_fack(struct tcp_sock *tp);
 425void tcp_close(struct sock *sk, long timeout);
 426void tcp_init_sock(struct sock *sk);
 427unsigned int tcp_poll(struct file *file, struct socket *sock,
 428                      struct poll_table_struct *wait);
 429int tcp_getsockopt(struct sock *sk, int level, int optname,
 430                   char __user *optval, int __user *optlen);
 431int tcp_setsockopt(struct sock *sk, int level, int optname,
 432                   char __user *optval, unsigned int optlen);
 433int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
 434                          char __user *optval, int __user *optlen);
 435int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
 436                          char __user *optval, unsigned int optlen);
 437void tcp_set_keepalive(struct sock *sk, int val);
 438void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
 439int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 440                size_t len, int nonblock, int flags, int *addr_len);
 441void tcp_parse_options(const struct sk_buff *skb,
 442                       struct tcp_options_received *opt_rx,
 443                       int estab, struct tcp_fastopen_cookie *foc);
 444const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
 445
 446/*
 447 *      TCP v4 functions exported for the inet6 API
 448 */
 449
 450void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
 451int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 452struct sock *tcp_create_openreq_child(struct sock *sk,
 453                                      struct request_sock *req,
 454                                      struct sk_buff *skb);
 455struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 456                                  struct request_sock *req,
 457                                  struct dst_entry *dst);
 458int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
 459int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 460int tcp_connect(struct sock *sk);
 461struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
 462                                struct request_sock *req,
 463                                struct tcp_fastopen_cookie *foc);
 464int tcp_disconnect(struct sock *sk, int flags);
 465
 466void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
 467int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
 468void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
 469
 470/* From syncookies.c */
 471int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
 472                      u32 cookie);
 473struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
 474                             struct ip_options *opt);
 475#ifdef CONFIG_SYN_COOKIES
 476
 477/* Syncookies use a monotonic timer which increments every 60 seconds.
 478 * This counter is used both as a hash input and partially encoded into
 479 * the cookie value.  A cookie is only validated further if the delta
 480 * between the current counter value and the encoded one is less than this,
 481 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
 482 * the counter advances immediately after a cookie is generated).
 483 */
 484#define MAX_SYNCOOKIE_AGE 2
 485
 486static inline u32 tcp_cookie_time(void)
 487{
 488        u64 val = get_jiffies_64();
 489
 490        do_div(val, 60 * HZ);
 491        return val;
 492}
 493
 494u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
 495                              u16 *mssp);
 496__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mss);
 497#else
 498static inline __u32 cookie_v4_init_sequence(struct sock *sk,
 499                                            struct sk_buff *skb,
 500                                            __u16 *mss)
 501{
 502        return 0;
 503}
 504#endif
 505
 506__u32 cookie_init_timestamp(struct request_sock *req);
 507bool cookie_check_timestamp(struct tcp_options_received *opt, struct net *net,
 508                            bool *ecn_ok);
 509
 510/* From net/ipv6/syncookies.c */
 511int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
 512                      u32 cookie);
 513struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
 514#ifdef CONFIG_SYN_COOKIES
 515u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
 516                              const struct tcphdr *th, u16 *mssp);
 517__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
 518                              __u16 *mss);
 519#else
 520static inline __u32 cookie_v6_init_sequence(struct sock *sk,
 521                                            struct sk_buff *skb,
 522                                            __u16 *mss)
 523{
 524        return 0;
 525}
 526#endif
 527/* tcp_output.c */
 528
 529void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
 530                               int nonagle);
 531bool tcp_may_send_now(struct sock *sk);
 532int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
 533int tcp_retransmit_skb(struct sock *, struct sk_buff *);
 534void tcp_retransmit_timer(struct sock *sk);
 535void tcp_xmit_retransmit_queue(struct sock *);
 536void tcp_simple_retransmit(struct sock *);
 537int tcp_trim_head(struct sock *, struct sk_buff *, u32);
 538int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
 539
 540void tcp_send_probe0(struct sock *);
 541void tcp_send_partial(struct sock *);
 542int tcp_write_wakeup(struct sock *);
 543void tcp_send_fin(struct sock *sk);
 544void tcp_send_active_reset(struct sock *sk, gfp_t priority);
 545int tcp_send_synack(struct sock *);
 546bool tcp_syn_flood_action(struct sock *sk, const struct sk_buff *skb,
 547                          const char *proto);
 548void tcp_push_one(struct sock *, unsigned int mss_now);
 549void tcp_send_ack(struct sock *sk);
 550void tcp_send_delayed_ack(struct sock *sk);
 551void tcp_send_loss_probe(struct sock *sk);
 552bool tcp_schedule_loss_probe(struct sock *sk);
 553
 554/* tcp_input.c */
 555void tcp_resume_early_retransmit(struct sock *sk);
 556void tcp_rearm_rto(struct sock *sk);
 557void tcp_reset(struct sock *sk);
 558
 559/* tcp_timer.c */
 560void tcp_init_xmit_timers(struct sock *);
 561static inline void tcp_clear_xmit_timers(struct sock *sk)
 562{
 563        inet_csk_clear_xmit_timers(sk);
 564}
 565
 566unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
 567unsigned int tcp_current_mss(struct sock *sk);
 568
 569/* Bound MSS / TSO packet size with the half of the window */
 570static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
 571{
 572        int cutoff;
 573
 574        /* When peer uses tiny windows, there is no use in packetizing
 575         * to sub-MSS pieces for the sake of SWS or making sure there
 576         * are enough packets in the pipe for fast recovery.
 577         *
 578         * On the other hand, for extremely large MSS devices, handling
 579         * smaller than MSS windows in this way does make sense.
 580         */
 581        if (tp->max_window >= 512)
 582                cutoff = (tp->max_window >> 1);
 583        else
 584                cutoff = tp->max_window;
 585
 586        if (cutoff && pktsize > cutoff)
 587                return max_t(int, cutoff, 68U - tp->tcp_header_len);
 588        else
 589                return pktsize;
 590}
 591
 592/* tcp.c */
 593void tcp_get_info(const struct sock *, struct tcp_info *);
 594
 595/* Read 'sendfile()'-style from a TCP socket */
 596typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
 597                                unsigned int, size_t);
 598int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
 599                  sk_read_actor_t recv_actor);
 600
 601void tcp_initialize_rcv_mss(struct sock *sk);
 602
 603int tcp_mtu_to_mss(struct sock *sk, int pmtu);
 604int tcp_mss_to_mtu(struct sock *sk, int mss);
 605void tcp_mtup_init(struct sock *sk);
 606void tcp_init_buffer_space(struct sock *sk);
 607
 608static inline void tcp_bound_rto(const struct sock *sk)
 609{
 610        if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
 611                inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
 612}
 613
 614static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
 615{
 616        return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
 617}
 618
 619static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
 620{
 621        tp->pred_flags = htonl((tp->tcp_header_len << 26) |
 622                               ntohl(TCP_FLAG_ACK) |
 623                               snd_wnd);
 624}
 625
 626static inline void tcp_fast_path_on(struct tcp_sock *tp)
 627{
 628        __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
 629}
 630
 631static inline void tcp_fast_path_check(struct sock *sk)
 632{
 633        struct tcp_sock *tp = tcp_sk(sk);
 634
 635        if (skb_queue_empty(&tp->out_of_order_queue) &&
 636            tp->rcv_wnd &&
 637            atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
 638            !tp->urg_data)
 639                tcp_fast_path_on(tp);
 640}
 641
 642/* Compute the actual rto_min value */
 643static inline u32 tcp_rto_min(struct sock *sk)
 644{
 645        const struct dst_entry *dst = __sk_dst_get(sk);
 646        u32 rto_min = TCP_RTO_MIN;
 647
 648        if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
 649                rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
 650        return rto_min;
 651}
 652
 653static inline u32 tcp_rto_min_us(struct sock *sk)
 654{
 655        return jiffies_to_usecs(tcp_rto_min(sk));
 656}
 657
 658/* Compute the actual receive window we are currently advertising.
 659 * Rcv_nxt can be after the window if our peer push more data
 660 * than the offered window.
 661 */
 662static inline u32 tcp_receive_window(const struct tcp_sock *tp)
 663{
 664        s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
 665
 666        if (win < 0)
 667                win = 0;
 668        return (u32) win;
 669}
 670
 671/* Choose a new window, without checks for shrinking, and without
 672 * scaling applied to the result.  The caller does these things
 673 * if necessary.  This is a "raw" window selection.
 674 */
 675u32 __tcp_select_window(struct sock *sk);
 676
 677void tcp_send_window_probe(struct sock *sk);
 678
 679/* TCP timestamps are only 32-bits, this causes a slight
 680 * complication on 64-bit systems since we store a snapshot
 681 * of jiffies in the buffer control blocks below.  We decided
 682 * to use only the low 32-bits of jiffies and hide the ugly
 683 * casts with the following macro.
 684 */
 685#define tcp_time_stamp          ((__u32)(jiffies))
 686
 687#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
 688
 689#define TCPHDR_FIN 0x01
 690#define TCPHDR_SYN 0x02
 691#define TCPHDR_RST 0x04
 692#define TCPHDR_PSH 0x08
 693#define TCPHDR_ACK 0x10
 694#define TCPHDR_URG 0x20
 695#define TCPHDR_ECE 0x40
 696#define TCPHDR_CWR 0x80
 697
 698/* This is what the send packet queuing engine uses to pass
 699 * TCP per-packet control information to the transmission code.
 700 * We also store the host-order sequence numbers in here too.
 701 * This is 44 bytes if IPV6 is enabled.
 702 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
 703 */
 704struct tcp_skb_cb {
 705        union {
 706                struct inet_skb_parm    h4;
 707#if IS_ENABLED(CONFIG_IPV6)
 708                struct inet6_skb_parm   h6;
 709#endif
 710        } header;       /* For incoming frames          */
 711        __u32           seq;            /* Starting sequence number     */
 712        __u32           end_seq;        /* SEQ + FIN + SYN + datalen    */
 713        __u32           when;           /* used to compute rtt's        */
 714        __u8            tcp_flags;      /* TCP header flags. (tcp[13])  */
 715
 716        __u8            sacked;         /* State flags for SACK/FACK.   */
 717#define TCPCB_SACKED_ACKED      0x01    /* SKB ACK'd by a SACK block    */
 718#define TCPCB_SACKED_RETRANS    0x02    /* SKB retransmitted            */
 719#define TCPCB_LOST              0x04    /* SKB is lost                  */
 720#define TCPCB_TAGBITS           0x07    /* All tag bits                 */
 721#define TCPCB_EVER_RETRANS      0x80    /* Ever retransmitted frame     */
 722#define TCPCB_RETRANS           (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
 723
 724        __u8            ip_dsfield;     /* IPv4 tos or IPv6 dsfield     */
 725        /* 1 byte hole */
 726        __u32           ack_seq;        /* Sequence number ACK'd        */
 727};
 728
 729#define TCP_SKB_CB(__skb)       ((struct tcp_skb_cb *)&((__skb)->cb[0]))
 730
 731/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set
 732 *
 733 * If we receive a SYN packet with these bits set, it means a network is
 734 * playing bad games with TOS bits. In order to avoid possible false congestion
 735 * notifications, we disable TCP ECN negociation.
 736 */
 737static inline void
 738TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb,
 739                struct net *net)
 740{
 741        const struct tcphdr *th = tcp_hdr(skb);
 742
 743        if (net->ipv4.sysctl_tcp_ecn && th->ece && th->cwr &&
 744            INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield))
 745                inet_rsk(req)->ecn_ok = 1;
 746}
 747
 748/* Due to TSO, an SKB can be composed of multiple actual
 749 * packets.  To keep these tracked properly, we use this.
 750 */
 751static inline int tcp_skb_pcount(const struct sk_buff *skb)
 752{
 753        return skb_shinfo(skb)->gso_segs;
 754}
 755
 756/* This is valid iff tcp_skb_pcount() > 1. */
 757static inline int tcp_skb_mss(const struct sk_buff *skb)
 758{
 759        return skb_shinfo(skb)->gso_size;
 760}
 761
 762/* Events passed to congestion control interface */
 763enum tcp_ca_event {
 764        CA_EVENT_TX_START,      /* first transmit when no packets in flight */
 765        CA_EVENT_CWND_RESTART,  /* congestion window restart */
 766        CA_EVENT_COMPLETE_CWR,  /* end of congestion recovery */
 767        CA_EVENT_LOSS,          /* loss timeout */
 768        CA_EVENT_FAST_ACK,      /* in sequence ack */
 769        CA_EVENT_SLOW_ACK,      /* other ack */
 770};
 771
 772/*
 773 * Interface for adding new TCP congestion control handlers
 774 */
 775#define TCP_CA_NAME_MAX 16
 776#define TCP_CA_MAX      128
 777#define TCP_CA_BUF_MAX  (TCP_CA_NAME_MAX*TCP_CA_MAX)
 778
 779#define TCP_CONG_NON_RESTRICTED 0x1
 780
 781struct tcp_congestion_ops {
 782        struct list_head        list;
 783        unsigned long flags;
 784
 785        /* initialize private data (optional) */
 786        void (*init)(struct sock *sk);
 787        /* cleanup private data  (optional) */
 788        void (*release)(struct sock *sk);
 789
 790        /* return slow start threshold (required) */
 791        u32 (*ssthresh)(struct sock *sk);
 792        /* do new cwnd calculation (required) */
 793        void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
 794        /* call before changing ca_state (optional) */
 795        void (*set_state)(struct sock *sk, u8 new_state);
 796        /* call when cwnd event occurs (optional) */
 797        void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
 798        /* new value of cwnd after loss (optional) */
 799        u32  (*undo_cwnd)(struct sock *sk);
 800        /* hook for packet ack accounting (optional) */
 801        void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
 802        /* get info for inet_diag (optional) */
 803        void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
 804
 805        char            name[TCP_CA_NAME_MAX];
 806        struct module   *owner;
 807};
 808
 809int tcp_register_congestion_control(struct tcp_congestion_ops *type);
 810void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
 811
 812void tcp_init_congestion_control(struct sock *sk);
 813void tcp_cleanup_congestion_control(struct sock *sk);
 814int tcp_set_default_congestion_control(const char *name);
 815void tcp_get_default_congestion_control(char *name);
 816void tcp_get_available_congestion_control(char *buf, size_t len);
 817void tcp_get_allowed_congestion_control(char *buf, size_t len);
 818int tcp_set_allowed_congestion_control(char *allowed);
 819int tcp_set_congestion_control(struct sock *sk, const char *name);
 820int tcp_slow_start(struct tcp_sock *tp, u32 acked);
 821void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
 822
 823extern struct tcp_congestion_ops tcp_init_congestion_ops;
 824u32 tcp_reno_ssthresh(struct sock *sk);
 825void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
 826extern struct tcp_congestion_ops tcp_reno;
 827
 828static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
 829{
 830        struct inet_connection_sock *icsk = inet_csk(sk);
 831
 832        if (icsk->icsk_ca_ops->set_state)
 833                icsk->icsk_ca_ops->set_state(sk, ca_state);
 834        icsk->icsk_ca_state = ca_state;
 835}
 836
 837static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
 838{
 839        const struct inet_connection_sock *icsk = inet_csk(sk);
 840
 841        if (icsk->icsk_ca_ops->cwnd_event)
 842                icsk->icsk_ca_ops->cwnd_event(sk, event);
 843}
 844
 845/* These functions determine how the current flow behaves in respect of SACK
 846 * handling. SACK is negotiated with the peer, and therefore it can vary
 847 * between different flows.
 848 *
 849 * tcp_is_sack - SACK enabled
 850 * tcp_is_reno - No SACK
 851 * tcp_is_fack - FACK enabled, implies SACK enabled
 852 */
 853static inline int tcp_is_sack(const struct tcp_sock *tp)
 854{
 855        return tp->rx_opt.sack_ok;
 856}
 857
 858static inline bool tcp_is_reno(const struct tcp_sock *tp)
 859{
 860        return !tcp_is_sack(tp);
 861}
 862
 863static inline bool tcp_is_fack(const struct tcp_sock *tp)
 864{
 865        return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
 866}
 867
 868static inline void tcp_enable_fack(struct tcp_sock *tp)
 869{
 870        tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
 871}
 872
 873/* TCP early-retransmit (ER) is similar to but more conservative than
 874 * the thin-dupack feature.  Enable ER only if thin-dupack is disabled.
 875 */
 876static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
 877{
 878        tp->do_early_retrans = sysctl_tcp_early_retrans &&
 879                sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack &&
 880                sysctl_tcp_reordering == 3;
 881}
 882
 883static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
 884{
 885        tp->do_early_retrans = 0;
 886}
 887
 888static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
 889{
 890        return tp->sacked_out + tp->lost_out;
 891}
 892
 893/* This determines how many packets are "in the network" to the best
 894 * of our knowledge.  In many cases it is conservative, but where
 895 * detailed information is available from the receiver (via SACK
 896 * blocks etc.) we can make more aggressive calculations.
 897 *
 898 * Use this for decisions involving congestion control, use just
 899 * tp->packets_out to determine if the send queue is empty or not.
 900 *
 901 * Read this equation as:
 902 *
 903 *      "Packets sent once on transmission queue" MINUS
 904 *      "Packets left network, but not honestly ACKed yet" PLUS
 905 *      "Packets fast retransmitted"
 906 */
 907static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
 908{
 909        return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
 910}
 911
 912#define TCP_INFINITE_SSTHRESH   0x7fffffff
 913
 914static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
 915{
 916        return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
 917}
 918
 919static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
 920{
 921        return (TCPF_CA_CWR | TCPF_CA_Recovery) &
 922               (1 << inet_csk(sk)->icsk_ca_state);
 923}
 924
 925/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
 926 * The exception is cwnd reduction phase, when cwnd is decreasing towards
 927 * ssthresh.
 928 */
 929static inline __u32 tcp_current_ssthresh(const struct sock *sk)
 930{
 931        const struct tcp_sock *tp = tcp_sk(sk);
 932
 933        if (tcp_in_cwnd_reduction(sk))
 934                return tp->snd_ssthresh;
 935        else
 936                return max(tp->snd_ssthresh,
 937                           ((tp->snd_cwnd >> 1) +
 938                            (tp->snd_cwnd >> 2)));
 939}
 940
 941/* Use define here intentionally to get WARN_ON location shown at the caller */
 942#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
 943
 944void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
 945__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
 946
 947/* The maximum number of MSS of available cwnd for which TSO defers
 948 * sending if not using sysctl_tcp_tso_win_divisor.
 949 */
 950static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
 951{
 952        return 3;
 953}
 954
 955/* Slow start with delack produces 3 packets of burst, so that
 956 * it is safe "de facto".  This will be the default - same as
 957 * the default reordering threshold - but if reordering increases,
 958 * we must be able to allow cwnd to burst at least this much in order
 959 * to not pull it back when holes are filled.
 960 */
 961static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
 962{
 963        return tp->reordering;
 964}
 965
 966/* Returns end sequence number of the receiver's advertised window */
 967static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
 968{
 969        return tp->snd_una + tp->snd_wnd;
 970}
 971
 972/* We follow the spirit of RFC2861 to validate cwnd but implement a more
 973 * flexible approach. The RFC suggests cwnd should not be raised unless
 974 * it was fully used previously. And that's exactly what we do in
 975 * congestion avoidance mode. But in slow start we allow cwnd to grow
 976 * as long as the application has used half the cwnd.
 977 * Example :
 978 *    cwnd is 10 (IW10), but application sends 9 frames.
 979 *    We allow cwnd to reach 18 when all frames are ACKed.
 980 * This check is safe because it's as aggressive as slow start which already
 981 * risks 100% overshoot. The advantage is that we discourage application to
 982 * either send more filler packets or data to artificially blow up the cwnd
 983 * usage, and allow application-limited process to probe bw more aggressively.
 984 */
 985static inline bool tcp_is_cwnd_limited(const struct sock *sk)
 986{
 987        const struct tcp_sock *tp = tcp_sk(sk);
 988
 989        /* If in slow start, ensure cwnd grows to twice what was ACKed. */
 990        if (tp->snd_cwnd <= tp->snd_ssthresh)
 991                return tp->snd_cwnd < 2 * tp->max_packets_out;
 992
 993        return tp->is_cwnd_limited;
 994}
 995
 996static inline void tcp_check_probe_timer(struct sock *sk)
 997{
 998        const struct tcp_sock *tp = tcp_sk(sk);
 999        const struct inet_connection_sock *icsk = inet_csk(sk);
1000
1001        if (!tp->packets_out && !icsk->icsk_pending)
1002                inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1003                                          icsk->icsk_rto, TCP_RTO_MAX);
1004}
1005
1006static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1007{
1008        tp->snd_wl1 = seq;
1009}
1010
1011static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1012{
1013        tp->snd_wl1 = seq;
1014}
1015
1016/*
1017 * Calculate(/check) TCP checksum
1018 */
1019static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1020                                   __be32 daddr, __wsum base)
1021{
1022        return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1023}
1024
1025static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
1026{
1027        return __skb_checksum_complete(skb);
1028}
1029
1030static inline bool tcp_checksum_complete(struct sk_buff *skb)
1031{
1032        return !skb_csum_unnecessary(skb) &&
1033                __tcp_checksum_complete(skb);
1034}
1035
1036/* Prequeue for VJ style copy to user, combined with checksumming. */
1037
1038static inline void tcp_prequeue_init(struct tcp_sock *tp)
1039{
1040        tp->ucopy.task = NULL;
1041        tp->ucopy.len = 0;
1042        tp->ucopy.memory = 0;
1043        skb_queue_head_init(&tp->ucopy.prequeue);
1044#ifdef CONFIG_NET_DMA
1045        tp->ucopy.dma_chan = NULL;
1046        tp->ucopy.wakeup = 0;
1047        tp->ucopy.pinned_list = NULL;
1048        tp->ucopy.dma_cookie = 0;
1049#endif
1050}
1051
1052bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
1053
1054#undef STATE_TRACE
1055
1056#ifdef STATE_TRACE
1057static const char *statename[]={
1058        "Unused","Established","Syn Sent","Syn Recv",
1059        "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1060        "Close Wait","Last ACK","Listen","Closing"
1061};
1062#endif
1063void tcp_set_state(struct sock *sk, int state);
1064
1065void tcp_done(struct sock *sk);
1066
1067static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1068{
1069        rx_opt->dsack = 0;
1070        rx_opt->num_sacks = 0;
1071}
1072
1073u32 tcp_default_init_rwnd(u32 mss);
1074
1075/* Determine a window scaling and initial window to offer. */
1076void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
1077                               __u32 *window_clamp, int wscale_ok,
1078                               __u8 *rcv_wscale, __u32 init_rcv_wnd);
1079
1080static inline int tcp_win_from_space(int space)
1081{
1082        return sysctl_tcp_adv_win_scale<=0 ?
1083                (space>>(-sysctl_tcp_adv_win_scale)) :
1084                space - (space>>sysctl_tcp_adv_win_scale);
1085}
1086
1087/* Note: caller must be prepared to deal with negative returns */ 
1088static inline int tcp_space(const struct sock *sk)
1089{
1090        return tcp_win_from_space(sk->sk_rcvbuf -
1091                                  atomic_read(&sk->sk_rmem_alloc));
1092} 
1093
1094static inline int tcp_full_space(const struct sock *sk)
1095{
1096        return tcp_win_from_space(sk->sk_rcvbuf); 
1097}
1098
1099static inline void tcp_openreq_init(struct request_sock *req,
1100                                    struct tcp_options_received *rx_opt,
1101                                    struct sk_buff *skb)
1102{
1103        struct inet_request_sock *ireq = inet_rsk(req);
1104
1105        req->rcv_wnd = 0;               /* So that tcp_send_synack() knows! */
1106        req->cookie_ts = 0;
1107        tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1108        tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
1109        tcp_rsk(req)->snt_synack = 0;
1110        req->mss = rx_opt->mss_clamp;
1111        req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
1112        ireq->tstamp_ok = rx_opt->tstamp_ok;
1113        ireq->sack_ok = rx_opt->sack_ok;
1114        ireq->snd_wscale = rx_opt->snd_wscale;
1115        ireq->wscale_ok = rx_opt->wscale_ok;
1116        ireq->acked = 0;
1117        ireq->ecn_ok = 0;
1118        ireq->ir_rmt_port = tcp_hdr(skb)->source;
1119        ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
1120}
1121
1122extern void tcp_openreq_init_rwin(struct request_sock *req,
1123                                  struct sock *sk, struct dst_entry *dst);
1124
1125void tcp_enter_memory_pressure(struct sock *sk);
1126
1127static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1128{
1129        return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1130}
1131
1132static inline int keepalive_time_when(const struct tcp_sock *tp)
1133{
1134        return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1135}
1136
1137static inline int keepalive_probes(const struct tcp_sock *tp)
1138{
1139        return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
1140}
1141
1142static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1143{
1144        const struct inet_connection_sock *icsk = &tp->inet_conn;
1145
1146        return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
1147                          tcp_time_stamp - tp->rcv_tstamp);
1148}
1149
1150static inline int tcp_fin_time(const struct sock *sk)
1151{
1152        int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1153        const int rto = inet_csk(sk)->icsk_rto;
1154
1155        if (fin_timeout < (rto << 2) - (rto >> 1))
1156                fin_timeout = (rto << 2) - (rto >> 1);
1157
1158        return fin_timeout;
1159}
1160
1161static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1162                                  int paws_win)
1163{
1164        if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1165                return true;
1166        if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1167                return true;
1168        /*
1169         * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1170         * then following tcp messages have valid values. Ignore 0 value,
1171         * or else 'negative' tsval might forbid us to accept their packets.
1172         */
1173        if (!rx_opt->ts_recent)
1174                return true;
1175        return false;
1176}
1177
1178static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1179                                   int rst)
1180{
1181        if (tcp_paws_check(rx_opt, 0))
1182                return false;
1183
1184        /* RST segments are not recommended to carry timestamp,
1185           and, if they do, it is recommended to ignore PAWS because
1186           "their cleanup function should take precedence over timestamps."
1187           Certainly, it is mistake. It is necessary to understand the reasons
1188           of this constraint to relax it: if peer reboots, clock may go
1189           out-of-sync and half-open connections will not be reset.
1190           Actually, the problem would be not existing if all
1191           the implementations followed draft about maintaining clock
1192           via reboots. Linux-2.2 DOES NOT!
1193
1194           However, we can relax time bounds for RST segments to MSL.
1195         */
1196        if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1197                return false;
1198        return true;
1199}
1200
1201static inline void tcp_mib_init(struct net *net)
1202{
1203        /* See RFC 2012 */
1204        TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1205        TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1206        TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1207        TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
1208}
1209
1210/* from STCP */
1211static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1212{
1213        tp->lost_skb_hint = NULL;
1214}
1215
1216static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1217{
1218        tcp_clear_retrans_hints_partial(tp);
1219        tp->retransmit_skb_hint = NULL;
1220}
1221
1222/* MD5 Signature */
1223struct crypto_hash;
1224
1225union tcp_md5_addr {
1226        struct in_addr  a4;
1227#if IS_ENABLED(CONFIG_IPV6)
1228        struct in6_addr a6;
1229#endif
1230};
1231
1232/* - key database */
1233struct tcp_md5sig_key {
1234        struct hlist_node       node;
1235        u8                      keylen;
1236        u8                      family; /* AF_INET or AF_INET6 */
1237        union tcp_md5_addr      addr;
1238        u8                      key[TCP_MD5SIG_MAXKEYLEN];
1239        struct rcu_head         rcu;
1240};
1241
1242/* - sock block */
1243struct tcp_md5sig_info {
1244        struct hlist_head       head;
1245        struct rcu_head         rcu;
1246};
1247
1248/* - pseudo header */
1249struct tcp4_pseudohdr {
1250        __be32          saddr;
1251        __be32          daddr;
1252        __u8            pad;
1253        __u8            protocol;
1254        __be16          len;
1255};
1256
1257struct tcp6_pseudohdr {
1258        struct in6_addr saddr;
1259        struct in6_addr daddr;
1260        __be32          len;
1261        __be32          protocol;       /* including padding */
1262};
1263
1264union tcp_md5sum_block {
1265        struct tcp4_pseudohdr ip4;
1266#if IS_ENABLED(CONFIG_IPV6)
1267        struct tcp6_pseudohdr ip6;
1268#endif
1269};
1270
1271/* - pool: digest algorithm, hash description and scratch buffer */
1272struct tcp_md5sig_pool {
1273        struct hash_desc        md5_desc;
1274        union tcp_md5sum_block  md5_blk;
1275};
1276
1277/* - functions */
1278int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1279                        const struct sock *sk, const struct request_sock *req,
1280                        const struct sk_buff *skb);
1281int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1282                   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
1283int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1284                   int family);
1285struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
1286                                         struct sock *addr_sk);
1287
1288#ifdef CONFIG_TCP_MD5SIG
1289struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1290                                         const union tcp_md5_addr *addr,
1291                                         int family);
1292#define tcp_twsk_md5_key(twsk)  ((twsk)->tw_md5_key)
1293#else
1294static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1295                                         const union tcp_md5_addr *addr,
1296                                         int family)
1297{
1298        return NULL;
1299}
1300#define tcp_twsk_md5_key(twsk)  NULL
1301#endif
1302
1303bool tcp_alloc_md5sig_pool(void);
1304
1305struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1306static inline void tcp_put_md5sig_pool(void)
1307{
1308        local_bh_enable();
1309}
1310
1311int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
1312int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1313                          unsigned int header_len);
1314int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1315                     const struct tcp_md5sig_key *key);
1316
1317/* From tcp_fastopen.c */
1318void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1319                            struct tcp_fastopen_cookie *cookie, int *syn_loss,
1320                            unsigned long *last_syn_loss);
1321void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1322                            struct tcp_fastopen_cookie *cookie, bool syn_lost);
1323struct tcp_fastopen_request {
1324        /* Fast Open cookie. Size 0 means a cookie request */
1325        struct tcp_fastopen_cookie      cookie;
1326        struct msghdr                   *data;  /* data in MSG_FASTOPEN */
1327        size_t                          size;
1328        int                             copied; /* queued in tcp_connect() */
1329};
1330void tcp_free_fastopen_req(struct tcp_sock *tp);
1331
1332extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
1333int tcp_fastopen_reset_cipher(void *key, unsigned int len);
1334bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1335                      struct request_sock *req,
1336                      struct tcp_fastopen_cookie *foc,
1337                      struct dst_entry *dst);
1338void tcp_fastopen_init_key_once(bool publish);
1339#define TCP_FASTOPEN_KEY_LENGTH 16
1340
1341/* Fastopen key context */
1342struct tcp_fastopen_context {
1343        struct crypto_cipher    *tfm;
1344        __u8                    key[TCP_FASTOPEN_KEY_LENGTH];
1345        struct rcu_head         rcu;
1346};
1347
1348/* write queue abstraction */
1349static inline void tcp_write_queue_purge(struct sock *sk)
1350{
1351        struct sk_buff *skb;
1352
1353        while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
1354                sk_wmem_free_skb(sk, skb);
1355        sk_mem_reclaim(sk);
1356        tcp_clear_all_retrans_hints(tcp_sk(sk));
1357}
1358
1359static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1360{
1361        return skb_peek(&sk->sk_write_queue);
1362}
1363
1364static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1365{
1366        return skb_peek_tail(&sk->sk_write_queue);
1367}
1368
1369static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1370                                                   const struct sk_buff *skb)
1371{
1372        return skb_queue_next(&sk->sk_write_queue, skb);
1373}
1374
1375static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1376                                                   const struct sk_buff *skb)
1377{
1378        return skb_queue_prev(&sk->sk_write_queue, skb);
1379}
1380
1381#define tcp_for_write_queue(skb, sk)                                    \
1382        skb_queue_walk(&(sk)->sk_write_queue, skb)
1383
1384#define tcp_for_write_queue_from(skb, sk)                               \
1385        skb_queue_walk_from(&(sk)->sk_write_queue, skb)
1386
1387#define tcp_for_write_queue_from_safe(skb, tmp, sk)                     \
1388        skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1389
1390static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1391{
1392        return sk->sk_send_head;
1393}
1394
1395static inline bool tcp_skb_is_last(const struct sock *sk,
1396                                   const struct sk_buff *skb)
1397{
1398        return skb_queue_is_last(&sk->sk_write_queue, skb);
1399}
1400
1401static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
1402{
1403        if (tcp_skb_is_last(sk, skb))
1404                sk->sk_send_head = NULL;
1405        else
1406                sk->sk_send_head = tcp_write_queue_next(sk, skb);
1407}
1408
1409static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1410{
1411        if (sk->sk_send_head == skb_unlinked)
1412                sk->sk_send_head = NULL;
1413}
1414
1415static inline void tcp_init_send_head(struct sock *sk)
1416{
1417        sk->sk_send_head = NULL;
1418}
1419
1420static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1421{
1422        __skb_queue_tail(&sk->sk_write_queue, skb);
1423}
1424
1425static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1426{
1427        __tcp_add_write_queue_tail(sk, skb);
1428
1429        /* Queue it, remembering where we must start sending. */
1430        if (sk->sk_send_head == NULL) {
1431                sk->sk_send_head = skb;
1432
1433                if (tcp_sk(sk)->highest_sack == NULL)
1434                        tcp_sk(sk)->highest_sack = skb;
1435        }
1436}
1437
1438static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1439{
1440        __skb_queue_head(&sk->sk_write_queue, skb);
1441}
1442
1443/* Insert buff after skb on the write queue of sk.  */
1444static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1445                                                struct sk_buff *buff,
1446                                                struct sock *sk)
1447{
1448        __skb_queue_after(&sk->sk_write_queue, skb, buff);
1449}
1450
1451/* Insert new before skb on the write queue of sk.  */
1452static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1453                                                  struct sk_buff *skb,
1454                                                  struct sock *sk)
1455{
1456        __skb_queue_before(&sk->sk_write_queue, skb, new);
1457
1458        if (sk->sk_send_head == skb)
1459                sk->sk_send_head = new;
1460}
1461
1462static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1463{
1464        __skb_unlink(skb, &sk->sk_write_queue);
1465}
1466
1467static inline bool tcp_write_queue_empty(struct sock *sk)
1468{
1469        return skb_queue_empty(&sk->sk_write_queue);
1470}
1471
1472static inline void tcp_push_pending_frames(struct sock *sk)
1473{
1474        if (tcp_send_head(sk)) {
1475                struct tcp_sock *tp = tcp_sk(sk);
1476
1477                __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1478        }
1479}
1480
1481/* Start sequence of the skb just after the highest skb with SACKed
1482 * bit, valid only if sacked_out > 0 or when the caller has ensured
1483 * validity by itself.
1484 */
1485static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1486{
1487        if (!tp->sacked_out)
1488                return tp->snd_una;
1489
1490        if (tp->highest_sack == NULL)
1491                return tp->snd_nxt;
1492
1493        return TCP_SKB_CB(tp->highest_sack)->seq;
1494}
1495
1496static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1497{
1498        tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1499                                                tcp_write_queue_next(sk, skb);
1500}
1501
1502static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1503{
1504        return tcp_sk(sk)->highest_sack;
1505}
1506
1507static inline void tcp_highest_sack_reset(struct sock *sk)
1508{
1509        tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1510}
1511
1512/* Called when old skb is about to be deleted (to be combined with new skb) */
1513static inline void tcp_highest_sack_combine(struct sock *sk,
1514                                            struct sk_buff *old,
1515                                            struct sk_buff *new)
1516{
1517        if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1518                tcp_sk(sk)->highest_sack = new;
1519}
1520
1521/* Determines whether this is a thin stream (which may suffer from
1522 * increased latency). Used to trigger latency-reducing mechanisms.
1523 */
1524static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
1525{
1526        return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1527}
1528
1529/* /proc */
1530enum tcp_seq_states {
1531        TCP_SEQ_STATE_LISTENING,
1532        TCP_SEQ_STATE_OPENREQ,
1533        TCP_SEQ_STATE_ESTABLISHED,
1534};
1535
1536int tcp_seq_open(struct inode *inode, struct file *file);
1537
1538struct tcp_seq_afinfo {
1539        char                            *name;
1540        sa_family_t                     family;
1541        const struct file_operations    *seq_fops;
1542        struct seq_operations           seq_ops;
1543};
1544
1545struct tcp_iter_state {
1546        struct seq_net_private  p;
1547        sa_family_t             family;
1548        enum tcp_seq_states     state;
1549        struct sock             *syn_wait_sk;
1550        int                     bucket, offset, sbucket, num;
1551        kuid_t                  uid;
1552        loff_t                  last_pos;
1553};
1554
1555int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1556void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
1557
1558extern struct request_sock_ops tcp_request_sock_ops;
1559extern struct request_sock_ops tcp6_request_sock_ops;
1560
1561void tcp_v4_destroy_sock(struct sock *sk);
1562
1563struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1564                                netdev_features_t features);
1565struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
1566int tcp_gro_complete(struct sk_buff *skb);
1567
1568void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
1569
1570static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1571{
1572        return tp->notsent_lowat ?: sysctl_tcp_notsent_lowat;
1573}
1574
1575static inline bool tcp_stream_memory_free(const struct sock *sk)
1576{
1577        const struct tcp_sock *tp = tcp_sk(sk);
1578        u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
1579
1580        return notsent_bytes < tcp_notsent_lowat(tp);
1581}
1582
1583#ifdef CONFIG_PROC_FS
1584int tcp4_proc_init(void);
1585void tcp4_proc_exit(void);
1586#endif
1587
1588/* TCP af-specific functions */
1589struct tcp_sock_af_ops {
1590#ifdef CONFIG_TCP_MD5SIG
1591        struct tcp_md5sig_key   *(*md5_lookup) (struct sock *sk,
1592                                                struct sock *addr_sk);
1593        int                     (*calc_md5_hash) (char *location,
1594                                                  struct tcp_md5sig_key *md5,
1595                                                  const struct sock *sk,
1596                                                  const struct request_sock *req,
1597                                                  const struct sk_buff *skb);
1598        int                     (*md5_parse) (struct sock *sk,
1599                                              char __user *optval,
1600                                              int optlen);
1601#endif
1602};
1603
1604struct tcp_request_sock_ops {
1605#ifdef CONFIG_TCP_MD5SIG
1606        struct tcp_md5sig_key   *(*md5_lookup) (struct sock *sk,
1607                                                struct request_sock *req);
1608        int                     (*calc_md5_hash) (char *location,
1609                                                  struct tcp_md5sig_key *md5,
1610                                                  const struct sock *sk,
1611                                                  const struct request_sock *req,
1612                                                  const struct sk_buff *skb);
1613#endif
1614};
1615
1616int tcpv4_offload_init(void);
1617
1618void tcp_v4_init(void);
1619void tcp_init(void);
1620
1621#endif  /* _TCP_H */
1622
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.