1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _TCP_H
15#define _TCP_H
16
17#define FASTRETRANS_DEBUG 1
18
19#include <linux/list.h>
20#include <linux/tcp.h>
21#include <linux/bug.h>
22#include <linux/slab.h>
23#include <linux/cache.h>
24#include <linux/percpu.h>
25#include <linux/skbuff.h>
26#include <linux/kref.h>
27#include <linux/ktime.h>
28#include <linux/indirect_call_wrapper.h>
29
30#include <net/inet_connection_sock.h>
31#include <net/inet_timewait_sock.h>
32#include <net/inet_hashtables.h>
33#include <net/checksum.h>
34#include <net/request_sock.h>
35#include <net/sock_reuseport.h>
36#include <net/sock.h>
37#include <net/snmp.h>
38#include <net/ip.h>
39#include <net/tcp_states.h>
40#include <net/tcp_ao.h>
41#include <net/inet_ecn.h>
42#include <net/dst.h>
43#include <net/mptcp.h>
44
45#include <linux/seq_file.h>
46#include <linux/memcontrol.h>
47#include <linux/bpf-cgroup.h>
48#include <linux/siphash.h>
49
50extern struct inet_hashinfo tcp_hashinfo;
51
52DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
53int tcp_orphan_count_sum(void);
54
55void tcp_time_wait(struct sock *sk, int state, int timeo);
56
57#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
58#define MAX_TCP_OPTION_SPACE 40
59#define TCP_MIN_SND_MSS 48
60#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
61
62
63
64
65
66#define MAX_TCP_WINDOW 32767U
67
68
69#define TCP_MIN_MSS 88U
70
71
72#define TCP_BASE_MSS 1024
73
74
75#define TCP_PROBE_INTERVAL 600
76
77
78#define TCP_PROBE_THRESHOLD 8
79
80
81#define TCP_FASTRETRANS_THRESH 3
82
83
84#define TCP_MAX_QUICKACKS 16U
85
86
87#define TCP_MAX_WSCALE 14U
88
89
90#define TCP_URG_VALID 0x0100
91#define TCP_URG_NOTYET 0x0200
92#define TCP_URG_READ 0x0400
93
94#define TCP_RETR1 3
95
96
97
98
99
100
101#define TCP_RETR2 15
102
103
104
105
106
107
108#define TCP_SYN_RETRIES 6
109
110
111
112
113
114
115
116
117#define TCP_SYNACK_RETRIES 5
118
119
120
121
122
123
124#define TCP_TIMEWAIT_LEN (60*HZ)
125
126#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
127
128
129
130
131
132#define TCP_FIN_TIMEOUT_MAX (120 * HZ)
133
134#define TCP_DELACK_MAX ((unsigned)(HZ/5))
135static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
136
137#if HZ >= 100
138#define TCP_DELACK_MIN ((unsigned)(HZ/25))
139#define TCP_ATO_MIN ((unsigned)(HZ/25))
140#else
141#define TCP_DELACK_MIN 4U
142#define TCP_ATO_MIN 4U
143#endif
144#define TCP_RTO_MAX ((unsigned)(120*HZ))
145#define TCP_RTO_MIN ((unsigned)(HZ/5))
146#define TCP_TIMEOUT_MIN (2U)
147
148#define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC)
149
150#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))
151#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))
152
153
154
155
156
157
158#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U))
159
160
161#define TCP_KEEPALIVE_TIME (120*60*HZ)
162#define TCP_KEEPALIVE_PROBES 9
163#define TCP_KEEPALIVE_INTVL (75*HZ)
164
165#define MAX_TCP_KEEPIDLE 32767
166#define MAX_TCP_KEEPINTVL 32767
167#define MAX_TCP_KEEPCNT 127
168#define MAX_TCP_SYNCNT 127
169
170
171
172
173
174#define TCP_PAWS_WRAP (INT_MAX / USEC_PER_SEC)
175
176#define TCP_PAWS_MSL 60
177
178
179
180
181
182#define TCP_PAWS_WINDOW 1
183
184
185
186
187
188
189
190#define TCPOPT_NOP 1
191#define TCPOPT_EOL 0
192#define TCPOPT_MSS 2
193#define TCPOPT_WINDOW 3
194#define TCPOPT_SACK_PERM 4
195#define TCPOPT_SACK 5
196#define TCPOPT_TIMESTAMP 8
197#define TCPOPT_MD5SIG 19
198#define TCPOPT_AO 29
199#define TCPOPT_MPTCP 30
200#define TCPOPT_FASTOPEN 34
201#define TCPOPT_EXP 254
202
203
204
205#define TCPOPT_FASTOPEN_MAGIC 0xF989
206#define TCPOPT_SMC_MAGIC 0xE2D4C3D9
207
208
209
210
211
212#define TCPOLEN_MSS 4
213#define TCPOLEN_WINDOW 3
214#define TCPOLEN_SACK_PERM 2
215#define TCPOLEN_TIMESTAMP 10
216#define TCPOLEN_MD5SIG 18
217#define TCPOLEN_FASTOPEN_BASE 2
218#define TCPOLEN_EXP_FASTOPEN_BASE 4
219#define TCPOLEN_EXP_SMC_BASE 6
220
221
222#define TCPOLEN_TSTAMP_ALIGNED 12
223#define TCPOLEN_WSCALE_ALIGNED 4
224#define TCPOLEN_SACKPERM_ALIGNED 4
225#define TCPOLEN_SACK_BASE 2
226#define TCPOLEN_SACK_BASE_ALIGNED 4
227#define TCPOLEN_SACK_PERBLOCK 8
228#define TCPOLEN_MD5SIG_ALIGNED 20
229#define TCPOLEN_MSS_ALIGNED 4
230#define TCPOLEN_EXP_SMC_BASE_ALIGNED 8
231
232
233#define TCP_NAGLE_OFF 1
234#define TCP_NAGLE_CORK 2
235#define TCP_NAGLE_PUSH 4
236
237
238#define TCP_THIN_LINEAR_RETRIES 6
239
240
241#define TCP_INIT_CWND 10
242
243
244#define TFO_CLIENT_ENABLE 1
245#define TFO_SERVER_ENABLE 2
246#define TFO_CLIENT_NO_COOKIE 4
247
248
249#define TFO_SERVER_COOKIE_NOT_REQD 0x200
250
251
252
253
254#define TFO_SERVER_WO_SOCKOPT1 0x400
255
256
257
258extern int sysctl_tcp_max_orphans;
259extern long sysctl_tcp_mem[3];
260
261#define TCP_RACK_LOSS_DETECTION 0x1
262#define TCP_RACK_STATIC_REO_WND 0x2
263#define TCP_RACK_NO_DUPTHRESH 0x4
264
265extern atomic_long_t tcp_memory_allocated;
266DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
267
268extern struct percpu_counter tcp_sockets_allocated;
269extern unsigned long tcp_memory_pressure;
270
271
272static inline bool tcp_under_memory_pressure(const struct sock *sk)
273{
274 if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
275 mem_cgroup_under_socket_pressure(sk->sk_memcg))
276 return true;
277
278 return READ_ONCE(tcp_memory_pressure);
279}
280
281
282
283
284
285static inline bool before(__u32 seq1, __u32 seq2)
286{
287 return (__s32)(seq1-seq2) < 0;
288}
289#define after(seq2, seq1) before(seq1, seq2)
290
291
292static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
293{
294 return seq3 - seq2 >= seq1 - seq2;
295}
296
297static inline bool tcp_out_of_memory(struct sock *sk)
298{
299 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
300 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
301 return true;
302 return false;
303}
304
305static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
306{
307 sk_wmem_queued_add(sk, -skb->truesize);
308 if (!skb_zcopy_pure(skb))
309 sk_mem_uncharge(sk, skb->truesize);
310 else
311 sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb)));
312 __kfree_skb(skb);
313}
314
315void sk_forced_mem_schedule(struct sock *sk, int size);
316
317bool tcp_check_oom(struct sock *sk, int shift);
318
319
320extern struct proto tcp_prot;
321
322#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
323#define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field)
324#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
325#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
326
327void tcp_tasklet_init(void);
328
329int tcp_v4_err(struct sk_buff *skb, u32);
330
331void tcp_shutdown(struct sock *sk, int how);
332
333int tcp_v4_early_demux(struct sk_buff *skb);
334int tcp_v4_rcv(struct sk_buff *skb);
335
336void tcp_remove_empty_skb(struct sock *sk);
337int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
338int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
339int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
340 size_t size, struct ubuf_info *uarg);
341void tcp_splice_eof(struct socket *sock);
342int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
343int tcp_wmem_schedule(struct sock *sk, int copy);
344void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
345 int size_goal);
346void tcp_release_cb(struct sock *sk);
347void tcp_wfree(struct sk_buff *skb);
348void tcp_write_timer_handler(struct sock *sk);
349void tcp_delack_timer_handler(struct sock *sk);
350int tcp_ioctl(struct sock *sk, int cmd, int *karg);
351int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
352void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
353void tcp_rcv_space_adjust(struct sock *sk);
354int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
355void tcp_twsk_destructor(struct sock *sk);
356void tcp_twsk_purge(struct list_head *net_exit_list, int family);
357ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
358 struct pipe_inode_info *pipe, size_t len,
359 unsigned int flags);
360struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
361 bool force_schedule);
362
363static inline void tcp_dec_quickack_mode(struct sock *sk)
364{
365 struct inet_connection_sock *icsk = inet_csk(sk);
366
367 if (icsk->icsk_ack.quick) {
368
369 const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
370
371 if (pkts >= icsk->icsk_ack.quick) {
372 icsk->icsk_ack.quick = 0;
373
374 icsk->icsk_ack.ato = TCP_ATO_MIN;
375 } else
376 icsk->icsk_ack.quick -= pkts;
377 }
378}
379
380#define TCP_ECN_OK 1
381#define TCP_ECN_QUEUE_CWR 2
382#define TCP_ECN_DEMAND_CWR 4
383#define TCP_ECN_SEEN 8
384
385enum tcp_tw_status {
386 TCP_TW_SUCCESS = 0,
387 TCP_TW_RST = 1,
388 TCP_TW_ACK = 2,
389 TCP_TW_SYN = 3
390};
391
392
393enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
394 struct sk_buff *skb,
395 const struct tcphdr *th);
396struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
397 struct request_sock *req, bool fastopen,
398 bool *lost_race);
399int tcp_child_process(struct sock *parent, struct sock *child,
400 struct sk_buff *skb);
401void tcp_enter_loss(struct sock *sk);
402void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
403void tcp_clear_retrans(struct tcp_sock *tp);
404void tcp_update_metrics(struct sock *sk);
405void tcp_init_metrics(struct sock *sk);
406void tcp_metrics_init(void);
407bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
408void __tcp_close(struct sock *sk, long timeout);
409void tcp_close(struct sock *sk, long timeout);
410void tcp_init_sock(struct sock *sk);
411void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
412__poll_t tcp_poll(struct file *file, struct socket *sock,
413 struct poll_table_struct *wait);
414int do_tcp_getsockopt(struct sock *sk, int level,
415 int optname, sockptr_t optval, sockptr_t optlen);
416int tcp_getsockopt(struct sock *sk, int level, int optname,
417 char __user *optval, int __user *optlen);
418bool tcp_bpf_bypass_getsockopt(int level, int optname);
419int do_tcp_setsockopt(struct sock *sk, int level, int optname,
420 sockptr_t optval, unsigned int optlen);
421int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
422 unsigned int optlen);
423void tcp_set_keepalive(struct sock *sk, int val);
424void tcp_syn_ack_timeout(const struct request_sock *req);
425int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
426 int flags, int *addr_len);
427int tcp_set_rcvlowat(struct sock *sk, int val);
428int tcp_set_window_clamp(struct sock *sk, int val);
429void tcp_update_recv_tstamps(struct sk_buff *skb,
430 struct scm_timestamping_internal *tss);
431void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
432 struct scm_timestamping_internal *tss);
433void tcp_data_ready(struct sock *sk);
434#ifdef CONFIG_MMU
435int tcp_mmap(struct file *file, struct socket *sock,
436 struct vm_area_struct *vma);
437#endif
438void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
439 struct tcp_options_received *opt_rx,
440 int estab, struct tcp_fastopen_cookie *foc);
441
442
443
444
445u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
446 struct tcphdr *th, u32 *cookie);
447u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
448 struct tcphdr *th, u32 *cookie);
449u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss);
450u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
451 const struct tcp_request_sock_ops *af_ops,
452 struct sock *sk, struct tcphdr *th);
453
454
455
456
457void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
458void tcp_v4_mtu_reduced(struct sock *sk);
459void tcp_req_err(struct sock *sk, u32 seq, bool abort);
460void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
461int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
462struct sock *tcp_create_openreq_child(const struct sock *sk,
463 struct request_sock *req,
464 struct sk_buff *skb);
465void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
466struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
467 struct request_sock *req,
468 struct dst_entry *dst,
469 struct request_sock *req_unhash,
470 bool *own_req);
471int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
472int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
473int tcp_connect(struct sock *sk);
474enum tcp_synack_type {
475 TCP_SYNACK_NORMAL,
476 TCP_SYNACK_FASTOPEN,
477 TCP_SYNACK_COOKIE,
478};
479struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
480 struct request_sock *req,
481 struct tcp_fastopen_cookie *foc,
482 enum tcp_synack_type synack_type,
483 struct sk_buff *syn_skb);
484int tcp_disconnect(struct sock *sk, int flags);
485
486void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
487int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
488void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
489
490
491struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
492 struct request_sock *req,
493 struct dst_entry *dst, u32 tsoff);
494int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
495 u32 cookie);
496struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
497struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
498 const struct tcp_request_sock_ops *af_ops,
499 struct sock *sk, struct sk_buff *skb);
500#ifdef CONFIG_SYN_COOKIES
501
502
503
504
505
506
507
508
509#define MAX_SYNCOOKIE_AGE 2
510#define TCP_SYNCOOKIE_PERIOD (60 * HZ)
511#define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
512
513
514
515
516
517static inline void tcp_synq_overflow(const struct sock *sk)
518{
519 unsigned int last_overflow;
520 unsigned int now = jiffies;
521
522 if (sk->sk_reuseport) {
523 struct sock_reuseport *reuse;
524
525 reuse = rcu_dereference(sk->sk_reuseport_cb);
526 if (likely(reuse)) {
527 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
528 if (!time_between32(now, last_overflow,
529 last_overflow + HZ))
530 WRITE_ONCE(reuse->synq_overflow_ts, now);
531 return;
532 }
533 }
534
535 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
536 if (!time_between32(now, last_overflow, last_overflow + HZ))
537 WRITE_ONCE(tcp_sk_rw(sk)->rx_opt.ts_recent_stamp, now);
538}
539
540
541static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
542{
543 unsigned int last_overflow;
544 unsigned int now = jiffies;
545
546 if (sk->sk_reuseport) {
547 struct sock_reuseport *reuse;
548
549 reuse = rcu_dereference(sk->sk_reuseport_cb);
550 if (likely(reuse)) {
551 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
552 return !time_between32(now, last_overflow - HZ,
553 last_overflow +
554 TCP_SYNCOOKIE_VALID);
555 }
556 }
557
558 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
559
560
561
562
563
564
565
566
567 return !time_between32(now, last_overflow - HZ,
568 last_overflow + TCP_SYNCOOKIE_VALID);
569}
570
571static inline u32 tcp_cookie_time(void)
572{
573 u64 val = get_jiffies_64();
574
575 do_div(val, TCP_SYNCOOKIE_PERIOD);
576 return val;
577}
578
579u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
580 u16 *mssp);
581__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
582u64 cookie_init_timestamp(struct request_sock *req, u64 now);
583bool cookie_timestamp_decode(const struct net *net,
584 struct tcp_options_received *opt);
585bool cookie_ecn_ok(const struct tcp_options_received *opt,
586 const struct net *net, const struct dst_entry *dst);
587
588
589int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
590 u32 cookie);
591struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
592
593u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
594 const struct tcphdr *th, u16 *mssp);
595__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
596#endif
597
598
599void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
600void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
601void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
602 int nonagle);
603int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
604int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
605void tcp_retransmit_timer(struct sock *sk);
606void tcp_xmit_retransmit_queue(struct sock *);
607void tcp_simple_retransmit(struct sock *);
608void tcp_enter_recovery(struct sock *sk, bool ece_ack);
609int tcp_trim_head(struct sock *, struct sk_buff *, u32);
610enum tcp_queue {
611 TCP_FRAG_IN_WRITE_QUEUE,
612 TCP_FRAG_IN_RTX_QUEUE,
613};
614int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
615 struct sk_buff *skb, u32 len,
616 unsigned int mss_now, gfp_t gfp);
617
618void tcp_send_probe0(struct sock *);
619int tcp_write_wakeup(struct sock *, int mib);
620void tcp_send_fin(struct sock *sk);
621void tcp_send_active_reset(struct sock *sk, gfp_t priority);
622int tcp_send_synack(struct sock *);
623void tcp_push_one(struct sock *, unsigned int mss_now);
624void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
625void tcp_send_ack(struct sock *sk);
626void tcp_send_delayed_ack(struct sock *sk);
627void tcp_send_loss_probe(struct sock *sk);
628bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
629void tcp_skb_collapse_tstamp(struct sk_buff *skb,
630 const struct sk_buff *next_skb);
631
632
633void tcp_rearm_rto(struct sock *sk);
634void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
635void tcp_reset(struct sock *sk, struct sk_buff *skb);
636void tcp_fin(struct sock *sk);
637void tcp_check_space(struct sock *sk);
638void tcp_sack_compress_send_ack(struct sock *sk);
639
640
641void tcp_init_xmit_timers(struct sock *);
642static inline void tcp_clear_xmit_timers(struct sock *sk)
643{
644 if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
645 __sock_put(sk);
646
647 if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
648 __sock_put(sk);
649
650 inet_csk_clear_xmit_timers(sk);
651}
652
653unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
654unsigned int tcp_current_mss(struct sock *sk);
655u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
656
657
658static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
659{
660 int cutoff;
661
662
663
664
665
666
667
668
669 if (tp->max_window > TCP_MSS_DEFAULT)
670 cutoff = (tp->max_window >> 1);
671 else
672 cutoff = tp->max_window;
673
674 if (cutoff && pktsize > cutoff)
675 return max_t(int, cutoff, 68U - tp->tcp_header_len);
676 else
677 return pktsize;
678}
679
680
681void tcp_get_info(struct sock *, struct tcp_info *);
682
683
684int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
685 sk_read_actor_t recv_actor);
686int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
687struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
688void tcp_read_done(struct sock *sk, size_t len);
689
690void tcp_initialize_rcv_mss(struct sock *sk);
691
692int tcp_mtu_to_mss(struct sock *sk, int pmtu);
693int tcp_mss_to_mtu(struct sock *sk, int mss);
694void tcp_mtup_init(struct sock *sk);
695
696static inline void tcp_bound_rto(const struct sock *sk)
697{
698 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
699 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
700}
701
702static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
703{
704 return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
705}
706
707static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
708{
709
710 if (sk_is_mptcp((struct sock *)tp))
711 return;
712
713 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
714 ntohl(TCP_FLAG_ACK) |
715 snd_wnd);
716}
717
718static inline void tcp_fast_path_on(struct tcp_sock *tp)
719{
720 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
721}
722
723static inline void tcp_fast_path_check(struct sock *sk)
724{
725 struct tcp_sock *tp = tcp_sk(sk);
726
727 if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
728 tp->rcv_wnd &&
729 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
730 !tp->urg_data)
731 tcp_fast_path_on(tp);
732}
733
734u32 tcp_delack_max(const struct sock *sk);
735
736
737static inline u32 tcp_rto_min(const struct sock *sk)
738{
739 const struct dst_entry *dst = __sk_dst_get(sk);
740 u32 rto_min = inet_csk(sk)->icsk_rto_min;
741
742 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
743 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
744 return rto_min;
745}
746
747static inline u32 tcp_rto_min_us(const struct sock *sk)
748{
749 return jiffies_to_usecs(tcp_rto_min(sk));
750}
751
752static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
753{
754 return dst_metric_locked(dst, RTAX_CC_ALGO);
755}
756
757
758static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
759{
760 return minmax_get(&tp->rtt_min);
761}
762
763
764
765
766
767static inline u32 tcp_receive_window(const struct tcp_sock *tp)
768{
769 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
770
771 if (win < 0)
772 win = 0;
773 return (u32) win;
774}
775
776
777
778
779
780u32 __tcp_select_window(struct sock *sk);
781
782void tcp_send_window_probe(struct sock *sk);
783
784
785
786
787
788#define tcp_jiffies32 ((u32)jiffies)
789
790
791
792
793
794
795#define TCP_TS_HZ 1000
796
797static inline u64 tcp_clock_ns(void)
798{
799 return ktime_get_ns();
800}
801
802static inline u64 tcp_clock_us(void)
803{
804 return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
805}
806
807static inline u64 tcp_clock_ms(void)
808{
809 return div_u64(tcp_clock_ns(), NSEC_PER_MSEC);
810}
811
812
813
814
815
816
817static inline u32 tcp_clock_ts(bool usec_ts)
818{
819 return usec_ts ? tcp_clock_us() : tcp_clock_ms();
820}
821
822static inline u32 tcp_time_stamp_ms(const struct tcp_sock *tp)
823{
824 return div_u64(tp->tcp_mstamp, USEC_PER_MSEC);
825}
826
827static inline u32 tcp_time_stamp_ts(const struct tcp_sock *tp)
828{
829 if (tp->tcp_usec_ts)
830 return tp->tcp_mstamp;
831 return tcp_time_stamp_ms(tp);
832}
833
834void tcp_mstamp_refresh(struct tcp_sock *tp);
835
836static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
837{
838 return max_t(s64, t1 - t0, 0);
839}
840
841
842static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
843{
844 return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
845}
846
847
848static inline u32 tcp_skb_timestamp_ts(bool usec_ts, const struct sk_buff *skb)
849{
850 if (usec_ts)
851 return tcp_skb_timestamp_us(skb);
852
853 return div_u64(skb->skb_mstamp_ns, NSEC_PER_MSEC);
854}
855
856static inline u32 tcp_tw_tsval(const struct tcp_timewait_sock *tcptw)
857{
858 return tcp_clock_ts(tcptw->tw_sk.tw_usec_ts) + tcptw->tw_ts_offset;
859}
860
861static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq)
862{
863 return tcp_clock_ts(treq->req_usec_ts) + treq->ts_off;
864}
865
866#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
867
868#define TCPHDR_FIN 0x01
869#define TCPHDR_SYN 0x02
870#define TCPHDR_RST 0x04
871#define TCPHDR_PSH 0x08
872#define TCPHDR_ACK 0x10
873#define TCPHDR_URG 0x20
874#define TCPHDR_ECE 0x40
875#define TCPHDR_CWR 0x80
876
877#define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
878
879
880
881
882
883
884
885struct tcp_skb_cb {
886 __u32 seq;
887 __u32 end_seq;
888 union {
889
890
891
892
893
894
895 __u32 tcp_tw_isn;
896 struct {
897 u16 tcp_gso_segs;
898 u16 tcp_gso_size;
899 };
900 };
901 __u8 tcp_flags;
902
903 __u8 sacked;
904#define TCPCB_SACKED_ACKED 0x01
905#define TCPCB_SACKED_RETRANS 0x02
906#define TCPCB_LOST 0x04
907#define TCPCB_TAGBITS 0x07
908#define TCPCB_REPAIRED 0x10
909#define TCPCB_EVER_RETRANS 0x80
910#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
911 TCPCB_REPAIRED)
912
913 __u8 ip_dsfield;
914 __u8 txstamp_ack:1,
915 eor:1,
916 has_rxtstamp:1,
917 unused:5;
918 __u32 ack_seq;
919 union {
920 struct {
921#define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
922
923 __u32 is_app_limited:1,
924 delivered_ce:20,
925 unused:11;
926
927 __u32 delivered;
928
929 u64 first_tx_mstamp;
930
931 u64 delivered_mstamp;
932 } tx;
933 union {
934 struct inet_skb_parm h4;
935#if IS_ENABLED(CONFIG_IPV6)
936 struct inet6_skb_parm h6;
937#endif
938 } header;
939 };
940};
941
942#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
943
944extern const struct inet_connection_sock_af_ops ipv4_specific;
945
946#if IS_ENABLED(CONFIG_IPV6)
947
948
949
950static inline int tcp_v6_iif(const struct sk_buff *skb)
951{
952 return TCP_SKB_CB(skb)->header.h6.iif;
953}
954
955static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
956{
957 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
958
959 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
960}
961
962
963static inline int tcp_v6_sdif(const struct sk_buff *skb)
964{
965#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
966 if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
967 return TCP_SKB_CB(skb)->header.h6.iif;
968#endif
969 return 0;
970}
971
972extern const struct inet_connection_sock_af_ops ipv6_specific;
973
974INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
975INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
976void tcp_v6_early_demux(struct sk_buff *skb);
977
978#endif
979
980
981static inline int tcp_v4_sdif(struct sk_buff *skb)
982{
983#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
984 if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
985 return TCP_SKB_CB(skb)->header.h4.iif;
986#endif
987 return 0;
988}
989
990
991
992
993static inline int tcp_skb_pcount(const struct sk_buff *skb)
994{
995 return TCP_SKB_CB(skb)->tcp_gso_segs;
996}
997
998static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
999{
1000 TCP_SKB_CB(skb)->tcp_gso_segs = segs;
1001}
1002
1003static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
1004{
1005 TCP_SKB_CB(skb)->tcp_gso_segs += segs;
1006}
1007
1008
1009static inline int tcp_skb_mss(const struct sk_buff *skb)
1010{
1011 return TCP_SKB_CB(skb)->tcp_gso_size;
1012}
1013
1014static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
1015{
1016 return likely(!TCP_SKB_CB(skb)->eor);
1017}
1018
1019static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
1020 const struct sk_buff *from)
1021{
1022 return likely(tcp_skb_can_collapse_to(to) &&
1023 mptcp_skb_can_collapse(to, from) &&
1024 skb_pure_zcopy_same(to, from));
1025}
1026
1027
1028enum tcp_ca_event {
1029 CA_EVENT_TX_START,
1030 CA_EVENT_CWND_RESTART,
1031 CA_EVENT_COMPLETE_CWR,
1032 CA_EVENT_LOSS,
1033 CA_EVENT_ECN_NO_CE,
1034 CA_EVENT_ECN_IS_CE,
1035};
1036
1037
1038enum tcp_ca_ack_event_flags {
1039 CA_ACK_SLOWPATH = (1 << 0),
1040 CA_ACK_WIN_UPDATE = (1 << 1),
1041 CA_ACK_ECE = (1 << 2),
1042};
1043
1044
1045
1046
1047#define TCP_CA_NAME_MAX 16
1048#define TCP_CA_MAX 128
1049#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
1050
1051#define TCP_CA_UNSPEC 0
1052
1053
1054#define TCP_CONG_NON_RESTRICTED 0x1
1055
1056#define TCP_CONG_NEEDS_ECN 0x2
1057#define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
1058
1059union tcp_cc_info;
1060
1061struct ack_sample {
1062 u32 pkts_acked;
1063 s32 rtt_us;
1064 u32 in_flight;
1065};
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075struct rate_sample {
1076 u64 prior_mstamp;
1077 u32 prior_delivered;
1078 u32 prior_delivered_ce;
1079 s32 delivered;
1080 s32 delivered_ce;
1081 long interval_us;
1082 u32 snd_interval_us;
1083 u32 rcv_interval_us;
1084 long rtt_us;
1085 int losses;
1086 u32 acked_sacked;
1087 u32 prior_in_flight;
1088 u32 last_end_seq;
1089 bool is_app_limited;
1090 bool is_retrans;
1091 bool is_ack_delayed;
1092};
1093
1094struct tcp_congestion_ops {
1095
1096
1097
1098 u32 (*ssthresh)(struct sock *sk);
1099
1100
1101 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1102
1103
1104 void (*set_state)(struct sock *sk, u8 new_state);
1105
1106
1107 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1108
1109
1110 void (*in_ack_event)(struct sock *sk, u32 flags);
1111
1112
1113 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1114
1115
1116 u32 (*min_tso_segs)(struct sock *sk);
1117
1118
1119
1120
1121 void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
1122
1123
1124
1125 u32 (*undo_cwnd)(struct sock *sk);
1126
1127 u32 (*sndbuf_expand)(struct sock *sk);
1128
1129
1130
1131 size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1132 union tcp_cc_info *info);
1133
1134 char name[TCP_CA_NAME_MAX];
1135 struct module *owner;
1136 struct list_head list;
1137 u32 key;
1138 u32 flags;
1139
1140
1141 void (*init)(struct sock *sk);
1142
1143 void (*release)(struct sock *sk);
1144} ____cacheline_aligned_in_smp;
1145
1146int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1147void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1148int tcp_update_congestion_control(struct tcp_congestion_ops *type,
1149 struct tcp_congestion_ops *old_type);
1150int tcp_validate_congestion_control(struct tcp_congestion_ops *ca);
1151
1152void tcp_assign_congestion_control(struct sock *sk);
1153void tcp_init_congestion_control(struct sock *sk);
1154void tcp_cleanup_congestion_control(struct sock *sk);
1155int tcp_set_default_congestion_control(struct net *net, const char *name);
1156void tcp_get_default_congestion_control(struct net *net, char *name);
1157void tcp_get_available_congestion_control(char *buf, size_t len);
1158void tcp_get_allowed_congestion_control(char *buf, size_t len);
1159int tcp_set_allowed_congestion_control(char *allowed);
1160int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1161 bool cap_net_admin);
1162u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1163void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1164
1165u32 tcp_reno_ssthresh(struct sock *sk);
1166u32 tcp_reno_undo_cwnd(struct sock *sk);
1167void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1168extern struct tcp_congestion_ops tcp_reno;
1169
1170struct tcp_congestion_ops *tcp_ca_find(const char *name);
1171struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1172u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
1173#ifdef CONFIG_INET
1174char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1175#else
1176static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1177{
1178 return NULL;
1179}
1180#endif
1181
1182static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1183{
1184 const struct inet_connection_sock *icsk = inet_csk(sk);
1185
1186 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1187}
1188
1189static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1190{
1191 const struct inet_connection_sock *icsk = inet_csk(sk);
1192
1193 if (icsk->icsk_ca_ops->cwnd_event)
1194 icsk->icsk_ca_ops->cwnd_event(sk, event);
1195}
1196
1197
1198void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
1199
1200
1201void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1202void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1203 struct rate_sample *rs);
1204void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1205 bool is_sack_reneg, struct rate_sample *rs);
1206void tcp_rate_check_app_limited(struct sock *sk);
1207
1208static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
1209{
1210 return t1 > t2 || (t1 == t2 && after(seq1, seq2));
1211}
1212
1213
1214
1215
1216
1217
1218
1219
1220static inline int tcp_is_sack(const struct tcp_sock *tp)
1221{
1222 return likely(tp->rx_opt.sack_ok);
1223}
1224
1225static inline bool tcp_is_reno(const struct tcp_sock *tp)
1226{
1227 return !tcp_is_sack(tp);
1228}
1229
1230static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1231{
1232 return tp->sacked_out + tp->lost_out;
1233}
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1250{
1251 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1252}
1253
1254#define TCP_INFINITE_SSTHRESH 0x7fffffff
1255
1256static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
1257{
1258 return tp->snd_cwnd;
1259}
1260
1261static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
1262{
1263 WARN_ON_ONCE((int)val <= 0);
1264 tp->snd_cwnd = val;
1265}
1266
1267static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1268{
1269 return tcp_snd_cwnd(tp) < tp->snd_ssthresh;
1270}
1271
1272static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1273{
1274 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1275}
1276
1277static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1278{
1279 return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1280 (1 << inet_csk(sk)->icsk_ca_state);
1281}
1282
1283
1284
1285
1286
1287static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1288{
1289 const struct tcp_sock *tp = tcp_sk(sk);
1290
1291 if (tcp_in_cwnd_reduction(sk))
1292 return tp->snd_ssthresh;
1293 else
1294 return max(tp->snd_ssthresh,
1295 ((tcp_snd_cwnd(tp) >> 1) +
1296 (tcp_snd_cwnd(tp) >> 2)));
1297}
1298
1299
1300#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
1301
1302void tcp_enter_cwr(struct sock *sk);
1303__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1304
1305
1306
1307
1308static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1309{
1310 return 3;
1311}
1312
1313
1314static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1315{
1316 return tp->snd_una + tp->snd_wnd;
1317}
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1333{
1334 const struct tcp_sock *tp = tcp_sk(sk);
1335
1336 if (tp->is_cwnd_limited)
1337 return true;
1338
1339
1340 if (tcp_in_slow_start(tp))
1341 return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
1342
1343 return false;
1344}
1345
1346
1347
1348
1349
1350
1351
1352static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1353{
1354 return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1355}
1356
1357
1358
1359
1360static inline unsigned long tcp_pacing_delay(const struct sock *sk)
1361{
1362 s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
1363
1364 return delay > 0 ? nsecs_to_jiffies(delay) : 0;
1365}
1366
1367static inline void tcp_reset_xmit_timer(struct sock *sk,
1368 const int what,
1369 unsigned long when,
1370 const unsigned long max_when)
1371{
1372 inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
1373 max_when);
1374}
1375
1376
1377
1378
1379
1380
1381
1382static inline unsigned long tcp_probe0_base(const struct sock *sk)
1383{
1384 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1385}
1386
1387
1388static inline unsigned long tcp_probe0_when(const struct sock *sk,
1389 unsigned long max_when)
1390{
1391 u8 backoff = min_t(u8, ilog2(TCP_RTO_MAX / TCP_RTO_MIN) + 1,
1392 inet_csk(sk)->icsk_backoff);
1393 u64 when = (u64)tcp_probe0_base(sk) << backoff;
1394
1395 return (unsigned long)min_t(u64, when, max_when);
1396}
1397
1398static inline void tcp_check_probe_timer(struct sock *sk)
1399{
1400 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1401 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1402 tcp_probe0_base(sk), TCP_RTO_MAX);
1403}
1404
1405static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1406{
1407 tp->snd_wl1 = seq;
1408}
1409
1410static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1411{
1412 tp->snd_wl1 = seq;
1413}
1414
1415
1416
1417
1418static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1419 __be32 daddr, __wsum base)
1420{
1421 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
1422}
1423
1424static inline bool tcp_checksum_complete(struct sk_buff *skb)
1425{
1426 return !skb_csum_unnecessary(skb) &&
1427 __skb_checksum_complete(skb);
1428}
1429
1430bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
1431 enum skb_drop_reason *reason);
1432
1433
1434int tcp_filter(struct sock *sk, struct sk_buff *skb);
1435void tcp_set_state(struct sock *sk, int state);
1436void tcp_done(struct sock *sk);
1437int tcp_abort(struct sock *sk, int err);
1438
1439static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1440{
1441 rx_opt->dsack = 0;
1442 rx_opt->num_sacks = 0;
1443}
1444
1445void tcp_cwnd_restart(struct sock *sk, s32 delta);
1446
1447static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1448{
1449 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1450 struct tcp_sock *tp = tcp_sk(sk);
1451 s32 delta;
1452
1453 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
1454 tp->packets_out || ca_ops->cong_control)
1455 return;
1456 delta = tcp_jiffies32 - tp->lsndtime;
1457 if (delta > inet_csk(sk)->icsk_rto)
1458 tcp_cwnd_restart(sk, delta);
1459}
1460
1461
1462void tcp_select_initial_window(const struct sock *sk, int __space,
1463 __u32 mss, __u32 *rcv_wnd,
1464 __u32 *window_clamp, int wscale_ok,
1465 __u8 *rcv_wscale, __u32 init_rcv_wnd);
1466
1467static inline int __tcp_win_from_space(u8 scaling_ratio, int space)
1468{
1469 s64 scaled_space = (s64)space * scaling_ratio;
1470
1471 return scaled_space >> TCP_RMEM_TO_WIN_SCALE;
1472}
1473
1474static inline int tcp_win_from_space(const struct sock *sk, int space)
1475{
1476 return __tcp_win_from_space(tcp_sk(sk)->scaling_ratio, space);
1477}
1478
1479
1480static inline int __tcp_space_from_win(u8 scaling_ratio, int win)
1481{
1482 u64 val = (u64)win << TCP_RMEM_TO_WIN_SCALE;
1483
1484 do_div(val, scaling_ratio);
1485 return val;
1486}
1487
1488static inline int tcp_space_from_win(const struct sock *sk, int win)
1489{
1490 return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win);
1491}
1492
1493
1494
1495
1496#define TCP_DEFAULT_SCALING_RATIO ((1200 << TCP_RMEM_TO_WIN_SCALE) / \
1497 SKB_TRUESIZE(4096))
1498
1499static inline void tcp_scaling_ratio_init(struct sock *sk)
1500{
1501 tcp_sk(sk)->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
1502}
1503
1504
1505static inline int tcp_space(const struct sock *sk)
1506{
1507 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
1508 READ_ONCE(sk->sk_backlog.len) -
1509 atomic_read(&sk->sk_rmem_alloc));
1510}
1511
1512static inline int tcp_full_space(const struct sock *sk)
1513{
1514 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1515}
1516
1517static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh)
1518{
1519 int unused_mem = sk_unused_reserved_mem(sk);
1520 struct tcp_sock *tp = tcp_sk(sk);
1521
1522 tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh);
1523 if (unused_mem)
1524 tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
1525 tcp_win_from_space(sk, unused_mem));
1526}
1527
1528static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
1529{
1530 __tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss);
1531}
1532
1533void tcp_cleanup_rbuf(struct sock *sk, int copied);
1534void __tcp_cleanup_rbuf(struct sock *sk, int copied);
1535
1536
1537
1538
1539
1540
1541
1542static inline bool tcp_rmem_pressure(const struct sock *sk)
1543{
1544 int rcvbuf, threshold;
1545
1546 if (tcp_under_memory_pressure(sk))
1547 return true;
1548
1549 rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1550 threshold = rcvbuf - (rcvbuf >> 3);
1551
1552 return atomic_read(&sk->sk_rmem_alloc) > threshold;
1553}
1554
1555static inline bool tcp_epollin_ready(const struct sock *sk, int target)
1556{
1557 const struct tcp_sock *tp = tcp_sk(sk);
1558 int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
1559
1560 if (avail <= 0)
1561 return false;
1562
1563 return (avail >= target) || tcp_rmem_pressure(sk) ||
1564 (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss);
1565}
1566
1567extern void tcp_openreq_init_rwin(struct request_sock *req,
1568 const struct sock *sk_listener,
1569 const struct dst_entry *dst);
1570
1571void tcp_enter_memory_pressure(struct sock *sk);
1572void tcp_leave_memory_pressure(struct sock *sk);
1573
1574static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1575{
1576 struct net *net = sock_net((struct sock *)tp);
1577 int val;
1578
1579
1580
1581
1582 val = READ_ONCE(tp->keepalive_intvl);
1583
1584 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
1585}
1586
1587static inline int keepalive_time_when(const struct tcp_sock *tp)
1588{
1589 struct net *net = sock_net((struct sock *)tp);
1590 int val;
1591
1592
1593 val = READ_ONCE(tp->keepalive_time);
1594
1595 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
1596}
1597
1598static inline int keepalive_probes(const struct tcp_sock *tp)
1599{
1600 struct net *net = sock_net((struct sock *)tp);
1601 int val;
1602
1603
1604
1605
1606 val = READ_ONCE(tp->keepalive_probes);
1607
1608 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
1609}
1610
1611static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1612{
1613 const struct inet_connection_sock *icsk = &tp->inet_conn;
1614
1615 return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1616 tcp_jiffies32 - tp->rcv_tstamp);
1617}
1618
1619static inline int tcp_fin_time(const struct sock *sk)
1620{
1621 int fin_timeout = tcp_sk(sk)->linger2 ? :
1622 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
1623 const int rto = inet_csk(sk)->icsk_rto;
1624
1625 if (fin_timeout < (rto << 2) - (rto >> 1))
1626 fin_timeout = (rto << 2) - (rto >> 1);
1627
1628 return fin_timeout;
1629}
1630
1631static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1632 int paws_win)
1633{
1634 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1635 return true;
1636 if (unlikely(!time_before32(ktime_get_seconds(),
1637 rx_opt->ts_recent_stamp + TCP_PAWS_WRAP)))
1638 return true;
1639
1640
1641
1642
1643
1644 if (!rx_opt->ts_recent)
1645 return true;
1646 return false;
1647}
1648
1649static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1650 int rst)
1651{
1652 if (tcp_paws_check(rx_opt, 0))
1653 return false;
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667 if (rst && !time_before32(ktime_get_seconds(),
1668 rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
1669 return false;
1670 return true;
1671}
1672
1673bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1674 int mib_idx, u32 *last_oow_ack_time);
1675
1676static inline void tcp_mib_init(struct net *net)
1677{
1678
1679 TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1680 TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1681 TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1682 TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1683}
1684
1685
1686static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1687{
1688 tp->lost_skb_hint = NULL;
1689}
1690
1691static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1692{
1693 tcp_clear_retrans_hints_partial(tp);
1694 tp->retransmit_skb_hint = NULL;
1695}
1696
1697#define tcp_md5_addr tcp_ao_addr
1698
1699
1700struct tcp_md5sig_key {
1701 struct hlist_node node;
1702 u8 keylen;
1703 u8 family;
1704 u8 prefixlen;
1705 u8 flags;
1706 union tcp_md5_addr addr;
1707 int l3index;
1708 u8 key[TCP_MD5SIG_MAXKEYLEN];
1709 struct rcu_head rcu;
1710};
1711
1712
1713struct tcp_md5sig_info {
1714 struct hlist_head head;
1715 struct rcu_head rcu;
1716};
1717
1718
1719struct tcp4_pseudohdr {
1720 __be32 saddr;
1721 __be32 daddr;
1722 __u8 pad;
1723 __u8 protocol;
1724 __be16 len;
1725};
1726
1727struct tcp6_pseudohdr {
1728 struct in6_addr saddr;
1729 struct in6_addr daddr;
1730 __be32 len;
1731 __be32 protocol;
1732};
1733
1734union tcp_md5sum_block {
1735 struct tcp4_pseudohdr ip4;
1736#if IS_ENABLED(CONFIG_IPV6)
1737 struct tcp6_pseudohdr ip6;
1738#endif
1739};
1740
1741
1742
1743
1744
1745
1746
1747
1748struct tcp_sigpool {
1749 void *scratch;
1750 struct ahash_request *req;
1751};
1752
1753int tcp_sigpool_alloc_ahash(const char *alg, size_t scratch_size);
1754void tcp_sigpool_get(unsigned int id);
1755void tcp_sigpool_release(unsigned int id);
1756int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp,
1757 const struct sk_buff *skb,
1758 unsigned int header_len);
1759
1760
1761
1762
1763
1764
1765
1766
1767int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c);
1768
1769
1770
1771
1772void tcp_sigpool_end(struct tcp_sigpool *c);
1773size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len);
1774
1775int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1776 const struct sock *sk, const struct sk_buff *skb);
1777int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1778 int family, u8 prefixlen, int l3index, u8 flags,
1779 const u8 *newkey, u8 newkeylen);
1780int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
1781 int family, u8 prefixlen, int l3index,
1782 struct tcp_md5sig_key *key);
1783
1784int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1785 int family, u8 prefixlen, int l3index, u8 flags);
1786void tcp_clear_md5_list(struct sock *sk);
1787struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1788 const struct sock *addr_sk);
1789
1790#ifdef CONFIG_TCP_MD5SIG
1791struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1792 const union tcp_md5_addr *addr,
1793 int family, bool any_l3index);
1794static inline struct tcp_md5sig_key *
1795tcp_md5_do_lookup(const struct sock *sk, int l3index,
1796 const union tcp_md5_addr *addr, int family)
1797{
1798 if (!static_branch_unlikely(&tcp_md5_needed.key))
1799 return NULL;
1800 return __tcp_md5_do_lookup(sk, l3index, addr, family, false);
1801}
1802
1803static inline struct tcp_md5sig_key *
1804tcp_md5_do_lookup_any_l3index(const struct sock *sk,
1805 const union tcp_md5_addr *addr, int family)
1806{
1807 if (!static_branch_unlikely(&tcp_md5_needed.key))
1808 return NULL;
1809 return __tcp_md5_do_lookup(sk, 0, addr, family, true);
1810}
1811
1812enum skb_drop_reason
1813tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
1814 const void *saddr, const void *daddr,
1815 int family, int l3index, const __u8 *hash_location);
1816
1817
1818#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
1819#else
1820static inline struct tcp_md5sig_key *
1821tcp_md5_do_lookup(const struct sock *sk, int l3index,
1822 const union tcp_md5_addr *addr, int family)
1823{
1824 return NULL;
1825}
1826
1827static inline struct tcp_md5sig_key *
1828tcp_md5_do_lookup_any_l3index(const struct sock *sk,
1829 const union tcp_md5_addr *addr, int family)
1830{
1831 return NULL;
1832}
1833
1834static inline enum skb_drop_reason
1835tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
1836 const void *saddr, const void *daddr,
1837 int family, int l3index, const __u8 *hash_location)
1838{
1839 return SKB_NOT_DROPPED_YET;
1840}
1841#define tcp_twsk_md5_key(twsk) NULL
1842#endif
1843
1844int tcp_md5_alloc_sigpool(void);
1845void tcp_md5_release_sigpool(void);
1846void tcp_md5_add_sigpool(void);
1847extern int tcp_md5_sigpool_id;
1848
1849int tcp_md5_hash_key(struct tcp_sigpool *hp,
1850 const struct tcp_md5sig_key *key);
1851
1852
1853void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1854 struct tcp_fastopen_cookie *cookie);
1855void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1856 struct tcp_fastopen_cookie *cookie, bool syn_lost,
1857 u16 try_exp);
1858struct tcp_fastopen_request {
1859
1860 struct tcp_fastopen_cookie cookie;
1861 struct msghdr *data;
1862 size_t size;
1863 int copied;
1864 struct ubuf_info *uarg;
1865};
1866void tcp_free_fastopen_req(struct tcp_sock *tp);
1867void tcp_fastopen_destroy_cipher(struct sock *sk);
1868void tcp_fastopen_ctx_destroy(struct net *net);
1869int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1870 void *primary_key, void *backup_key);
1871int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
1872 u64 *key);
1873void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1874struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1875 struct request_sock *req,
1876 struct tcp_fastopen_cookie *foc,
1877 const struct dst_entry *dst);
1878void tcp_fastopen_init_key_once(struct net *net);
1879bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1880 struct tcp_fastopen_cookie *cookie);
1881bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1882#define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
1883#define TCP_FASTOPEN_KEY_MAX 2
1884#define TCP_FASTOPEN_KEY_BUF_LENGTH \
1885 (TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
1886
1887
1888struct tcp_fastopen_context {
1889 siphash_key_t key[TCP_FASTOPEN_KEY_MAX];
1890 int num;
1891 struct rcu_head rcu;
1892};
1893
1894void tcp_fastopen_active_disable(struct sock *sk);
1895bool tcp_fastopen_active_should_disable(struct sock *sk);
1896void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1897void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1898
1899
1900static inline
1901struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
1902{
1903 struct tcp_fastopen_context *ctx;
1904
1905 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
1906 if (!ctx)
1907 ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
1908 return ctx;
1909}
1910
1911static inline
1912bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
1913 const struct tcp_fastopen_cookie *orig)
1914{
1915 if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
1916 orig->len == foc->len &&
1917 !memcmp(orig->val, foc->val, foc->len))
1918 return true;
1919 return false;
1920}
1921
1922static inline
1923int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
1924{
1925 return ctx->num;
1926}
1927
1928
1929
1930
1931enum tcp_chrono {
1932 TCP_CHRONO_UNSPEC,
1933 TCP_CHRONO_BUSY,
1934 TCP_CHRONO_RWND_LIMITED,
1935 TCP_CHRONO_SNDBUF_LIMITED,
1936 __TCP_CHRONO_MAX,
1937};
1938
1939void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1940void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1941
1942
1943
1944
1945static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
1946{
1947 skb->destructor = NULL;
1948 skb->_skb_refdst = 0UL;
1949}
1950
1951#define tcp_skb_tsorted_save(skb) { \
1952 unsigned long _save = skb->_skb_refdst; \
1953 skb->_skb_refdst = 0UL;
1954
1955#define tcp_skb_tsorted_restore(skb) \
1956 skb->_skb_refdst = _save; \
1957}
1958
1959void tcp_write_queue_purge(struct sock *sk);
1960
1961static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
1962{
1963 return skb_rb_first(&sk->tcp_rtx_queue);
1964}
1965
1966static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
1967{
1968 return skb_rb_last(&sk->tcp_rtx_queue);
1969}
1970
1971static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1972{
1973 return skb_peek_tail(&sk->sk_write_queue);
1974}
1975
1976#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
1977 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1978
1979static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1980{
1981 return skb_peek(&sk->sk_write_queue);
1982}
1983
1984static inline bool tcp_skb_is_last(const struct sock *sk,
1985 const struct sk_buff *skb)
1986{
1987 return skb_queue_is_last(&sk->sk_write_queue, skb);
1988}
1989
1990
1991
1992
1993
1994
1995
1996
1997static inline bool tcp_write_queue_empty(const struct sock *sk)
1998{
1999 const struct tcp_sock *tp = tcp_sk(sk);
2000
2001 return tp->write_seq == tp->snd_nxt;
2002}
2003
2004static inline bool tcp_rtx_queue_empty(const struct sock *sk)
2005{
2006 return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
2007}
2008
2009static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
2010{
2011 return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
2012}
2013
2014static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
2015{
2016 __skb_queue_tail(&sk->sk_write_queue, skb);
2017
2018
2019 if (sk->sk_write_queue.next == skb)
2020 tcp_chrono_start(sk, TCP_CHRONO_BUSY);
2021}
2022
2023
2024static inline void tcp_insert_write_queue_before(struct sk_buff *new,
2025 struct sk_buff *skb,
2026 struct sock *sk)
2027{
2028 __skb_queue_before(&sk->sk_write_queue, skb, new);
2029}
2030
2031static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
2032{
2033 tcp_skb_tsorted_anchor_cleanup(skb);
2034 __skb_unlink(skb, &sk->sk_write_queue);
2035}
2036
2037void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
2038
2039static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
2040{
2041 tcp_skb_tsorted_anchor_cleanup(skb);
2042 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
2043}
2044
2045static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
2046{
2047 list_del(&skb->tcp_tsorted_anchor);
2048 tcp_rtx_queue_unlink(skb, sk);
2049 tcp_wmem_free_skb(sk, skb);
2050}
2051
2052static inline void tcp_push_pending_frames(struct sock *sk)
2053{
2054 if (tcp_send_head(sk)) {
2055 struct tcp_sock *tp = tcp_sk(sk);
2056
2057 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
2058 }
2059}
2060
2061
2062
2063
2064
2065static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
2066{
2067 if (!tp->sacked_out)
2068 return tp->snd_una;
2069
2070 if (tp->highest_sack == NULL)
2071 return tp->snd_nxt;
2072
2073 return TCP_SKB_CB(tp->highest_sack)->seq;
2074}
2075
2076static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
2077{
2078 tcp_sk(sk)->highest_sack = skb_rb_next(skb);
2079}
2080
2081static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
2082{
2083 return tcp_sk(sk)->highest_sack;
2084}
2085
2086static inline void tcp_highest_sack_reset(struct sock *sk)
2087{
2088 tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
2089}
2090
2091
2092static inline void tcp_highest_sack_replace(struct sock *sk,
2093 struct sk_buff *old,
2094 struct sk_buff *new)
2095{
2096 if (old == tcp_highest_sack(sk))
2097 tcp_sk(sk)->highest_sack = new;
2098}
2099
2100
2101static inline bool inet_sk_transparent(const struct sock *sk)
2102{
2103 switch (sk->sk_state) {
2104 case TCP_TIME_WAIT:
2105 return inet_twsk(sk)->tw_transparent;
2106 case TCP_NEW_SYN_RECV:
2107 return inet_rsk(inet_reqsk(sk))->no_srccheck;
2108 }
2109 return inet_test_bit(TRANSPARENT, sk);
2110}
2111
2112
2113
2114
2115static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
2116{
2117 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
2118}
2119
2120
2121enum tcp_seq_states {
2122 TCP_SEQ_STATE_LISTENING,
2123 TCP_SEQ_STATE_ESTABLISHED,
2124};
2125
2126void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
2127void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2128void tcp_seq_stop(struct seq_file *seq, void *v);
2129
2130struct tcp_seq_afinfo {
2131 sa_family_t family;
2132};
2133
2134struct tcp_iter_state {
2135 struct seq_net_private p;
2136 enum tcp_seq_states state;
2137 struct sock *syn_wait_sk;
2138 int bucket, offset, sbucket, num;
2139 loff_t last_pos;
2140};
2141
2142extern struct request_sock_ops tcp_request_sock_ops;
2143extern struct request_sock_ops tcp6_request_sock_ops;
2144
2145void tcp_v4_destroy_sock(struct sock *sk);
2146
2147struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
2148 netdev_features_t features);
2149struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
2150INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
2151INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
2152INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
2153INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
2154#ifdef CONFIG_INET
2155void tcp_gro_complete(struct sk_buff *skb);
2156#else
2157static inline void tcp_gro_complete(struct sk_buff *skb) { }
2158#endif
2159
2160void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
2161
2162static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
2163{
2164 struct net *net = sock_net((struct sock *)tp);
2165 u32 val;
2166
2167 val = READ_ONCE(tp->notsent_lowat);
2168
2169 return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
2170}
2171
2172bool tcp_stream_memory_free(const struct sock *sk, int wake);
2173
2174#ifdef CONFIG_PROC_FS
2175int tcp4_proc_init(void);
2176void tcp4_proc_exit(void);
2177#endif
2178
2179int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
2180int tcp_conn_request(struct request_sock_ops *rsk_ops,
2181 const struct tcp_request_sock_ops *af_ops,
2182 struct sock *sk, struct sk_buff *skb);
2183
2184
2185struct tcp_sock_af_ops {
2186#ifdef CONFIG_TCP_MD5SIG
2187 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
2188 const struct sock *addr_sk);
2189 int (*calc_md5_hash)(char *location,
2190 const struct tcp_md5sig_key *md5,
2191 const struct sock *sk,
2192 const struct sk_buff *skb);
2193 int (*md5_parse)(struct sock *sk,
2194 int optname,
2195 sockptr_t optval,
2196 int optlen);
2197#endif
2198#ifdef CONFIG_TCP_AO
2199 int (*ao_parse)(struct sock *sk, int optname, sockptr_t optval, int optlen);
2200 struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2201 struct sock *addr_sk,
2202 int sndid, int rcvid);
2203 int (*ao_calc_key_sk)(struct tcp_ao_key *mkt, u8 *key,
2204 const struct sock *sk,
2205 __be32 sisn, __be32 disn, bool send);
2206 int (*calc_ao_hash)(char *location, struct tcp_ao_key *ao,
2207 const struct sock *sk, const struct sk_buff *skb,
2208 const u8 *tkey, int hash_offset, u32 sne);
2209#endif
2210};
2211
2212struct tcp_request_sock_ops {
2213 u16 mss_clamp;
2214#ifdef CONFIG_TCP_MD5SIG
2215 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2216 const struct sock *addr_sk);
2217 int (*calc_md5_hash) (char *location,
2218 const struct tcp_md5sig_key *md5,
2219 const struct sock *sk,
2220 const struct sk_buff *skb);
2221#endif
2222#ifdef CONFIG_TCP_AO
2223 struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2224 struct request_sock *req,
2225 int sndid, int rcvid);
2226 int (*ao_calc_key)(struct tcp_ao_key *mkt, u8 *key, struct request_sock *sk);
2227 int (*ao_synack_hash)(char *ao_hash, struct tcp_ao_key *mkt,
2228 struct request_sock *req, const struct sk_buff *skb,
2229 int hash_offset, u32 sne);
2230#endif
2231#ifdef CONFIG_SYN_COOKIES
2232 __u32 (*cookie_init_seq)(const struct sk_buff *skb,
2233 __u16 *mss);
2234#endif
2235 struct dst_entry *(*route_req)(const struct sock *sk,
2236 struct sk_buff *skb,
2237 struct flowi *fl,
2238 struct request_sock *req);
2239 u32 (*init_seq)(const struct sk_buff *skb);
2240 u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
2241 int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2242 struct flowi *fl, struct request_sock *req,
2243 struct tcp_fastopen_cookie *foc,
2244 enum tcp_synack_type synack_type,
2245 struct sk_buff *syn_skb);
2246};
2247
2248extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
2249#if IS_ENABLED(CONFIG_IPV6)
2250extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
2251#endif
2252
2253#ifdef CONFIG_SYN_COOKIES
2254static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2255 const struct sock *sk, struct sk_buff *skb,
2256 __u16 *mss)
2257{
2258 tcp_synq_overflow(sk);
2259 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
2260 return ops->cookie_init_seq(skb, mss);
2261}
2262#else
2263static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2264 const struct sock *sk, struct sk_buff *skb,
2265 __u16 *mss)
2266{
2267 return 0;
2268}
2269#endif
2270
2271struct tcp_key {
2272 union {
2273 struct {
2274 struct tcp_ao_key *ao_key;
2275 char *traffic_key;
2276 u32 sne;
2277 u8 rcv_next;
2278 };
2279 struct tcp_md5sig_key *md5_key;
2280 };
2281 enum {
2282 TCP_KEY_NONE = 0,
2283 TCP_KEY_MD5,
2284 TCP_KEY_AO,
2285 } type;
2286};
2287
2288static inline void tcp_get_current_key(const struct sock *sk,
2289 struct tcp_key *out)
2290{
2291#if defined(CONFIG_TCP_AO) || defined(CONFIG_TCP_MD5SIG)
2292 const struct tcp_sock *tp = tcp_sk(sk);
2293#endif
2294
2295#ifdef CONFIG_TCP_AO
2296 if (static_branch_unlikely(&tcp_ao_needed.key)) {
2297 struct tcp_ao_info *ao;
2298
2299 ao = rcu_dereference_protected(tp->ao_info,
2300 lockdep_sock_is_held(sk));
2301 if (ao) {
2302 out->ao_key = READ_ONCE(ao->current_key);
2303 out->type = TCP_KEY_AO;
2304 return;
2305 }
2306 }
2307#endif
2308#ifdef CONFIG_TCP_MD5SIG
2309 if (static_branch_unlikely(&tcp_md5_needed.key) &&
2310 rcu_access_pointer(tp->md5sig_info)) {
2311 out->md5_key = tp->af_specific->md5_lookup(sk, sk);
2312 if (out->md5_key) {
2313 out->type = TCP_KEY_MD5;
2314 return;
2315 }
2316 }
2317#endif
2318 out->type = TCP_KEY_NONE;
2319}
2320
2321static inline bool tcp_key_is_md5(const struct tcp_key *key)
2322{
2323#ifdef CONFIG_TCP_MD5SIG
2324 if (static_branch_unlikely(&tcp_md5_needed.key) &&
2325 key->type == TCP_KEY_MD5)
2326 return true;
2327#endif
2328 return false;
2329}
2330
2331static inline bool tcp_key_is_ao(const struct tcp_key *key)
2332{
2333#ifdef CONFIG_TCP_AO
2334 if (static_branch_unlikely(&tcp_ao_needed.key) &&
2335 key->type == TCP_KEY_AO)
2336 return true;
2337#endif
2338 return false;
2339}
2340
2341int tcpv4_offload_init(void);
2342
2343void tcp_v4_init(void);
2344void tcp_init(void);
2345
2346
2347void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2348void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2349extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
2350 u32 reo_wnd);
2351extern bool tcp_rack_mark_lost(struct sock *sk);
2352extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
2353 u64 xmit_time);
2354extern void tcp_rack_reo_timeout(struct sock *sk);
2355extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365#define TCP_PLB_SCALE 8
2366
2367
2368struct tcp_plb_state {
2369 u8 consec_cong_rounds:5,
2370 unused:3;
2371 u32 pause_until;
2372};
2373
2374static inline void tcp_plb_init(const struct sock *sk,
2375 struct tcp_plb_state *plb)
2376{
2377 plb->consec_cong_rounds = 0;
2378 plb->pause_until = 0;
2379}
2380void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
2381 const int cong_ratio);
2382void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb);
2383void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb);
2384
2385
2386static inline s64 tcp_rto_delta_us(const struct sock *sk)
2387{
2388 const struct sk_buff *skb = tcp_rtx_queue_head(sk);
2389 u32 rto = inet_csk(sk)->icsk_rto;
2390 u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
2391
2392 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
2393}
2394
2395
2396
2397
2398static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
2399 struct sk_buff *skb)
2400{
2401 const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
2402 struct ip_options_rcu *dopt = NULL;
2403
2404 if (opt->optlen) {
2405 int opt_size = sizeof(*dopt) + opt->optlen;
2406
2407 dopt = kmalloc(opt_size, GFP_ATOMIC);
2408 if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
2409 kfree(dopt);
2410 dopt = NULL;
2411 }
2412 }
2413 return dopt;
2414}
2415
2416
2417
2418
2419
2420
2421static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
2422{
2423 return skb->truesize == 2;
2424}
2425
2426static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
2427{
2428 skb->truesize = 2;
2429}
2430
2431static inline int tcp_inq(struct sock *sk)
2432{
2433 struct tcp_sock *tp = tcp_sk(sk);
2434 int answ;
2435
2436 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2437 answ = 0;
2438 } else if (sock_flag(sk, SOCK_URGINLINE) ||
2439 !tp->urg_data ||
2440 before(tp->urg_seq, tp->copied_seq) ||
2441 !before(tp->urg_seq, tp->rcv_nxt)) {
2442
2443 answ = tp->rcv_nxt - tp->copied_seq;
2444
2445
2446 if (answ && sock_flag(sk, SOCK_DONE))
2447 answ--;
2448 } else {
2449 answ = tp->urg_seq - tp->copied_seq;
2450 }
2451
2452 return answ;
2453}
2454
2455int tcp_peek_len(struct socket *sock);
2456
2457static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
2458{
2459 u16 segs_in;
2460
2461 segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2462
2463
2464
2465
2466 WRITE_ONCE(tp->segs_in, tp->segs_in + segs_in);
2467 if (skb->len > tcp_hdrlen(skb))
2468 WRITE_ONCE(tp->data_segs_in, tp->data_segs_in + segs_in);
2469}
2470
2471
2472
2473
2474
2475
2476
2477
2478static inline void tcp_listendrop(const struct sock *sk)
2479{
2480 atomic_inc(&((struct sock *)sk)->sk_drops);
2481 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
2482}
2483
2484enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2485
2486
2487
2488
2489
2490#define TCP_ULP_NAME_MAX 16
2491#define TCP_ULP_MAX 128
2492#define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2493
2494struct tcp_ulp_ops {
2495 struct list_head list;
2496
2497
2498 int (*init)(struct sock *sk);
2499
2500 void (*update)(struct sock *sk, struct proto *p,
2501 void (*write_space)(struct sock *sk));
2502
2503 void (*release)(struct sock *sk);
2504
2505 int (*get_info)(const struct sock *sk, struct sk_buff *skb);
2506 size_t (*get_info_size)(const struct sock *sk);
2507
2508 void (*clone)(const struct request_sock *req, struct sock *newsk,
2509 const gfp_t priority);
2510
2511 char name[TCP_ULP_NAME_MAX];
2512 struct module *owner;
2513};
2514int tcp_register_ulp(struct tcp_ulp_ops *type);
2515void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2516int tcp_set_ulp(struct sock *sk, const char *name);
2517void tcp_get_available_ulp(char *buf, size_t len);
2518void tcp_cleanup_ulp(struct sock *sk);
2519void tcp_update_ulp(struct sock *sk, struct proto *p,
2520 void (*write_space)(struct sock *sk));
2521
2522#define MODULE_ALIAS_TCP_ULP(name) \
2523 __MODULE_INFO(alias, alias_userspace, name); \
2524 __MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
2525
2526#ifdef CONFIG_NET_SOCK_MSG
2527struct sk_msg;
2528struct sk_psock;
2529
2530#ifdef CONFIG_BPF_SYSCALL
2531int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
2532void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2533#endif
2534
2535#ifdef CONFIG_INET
2536void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
2537#else
2538static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
2539{
2540}
2541#endif
2542
2543int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
2544 struct sk_msg *msg, u32 bytes, int flags);
2545#endif
2546
2547#if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG)
2548static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
2549{
2550}
2551#endif
2552
2553#ifdef CONFIG_CGROUP_BPF
2554static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2555 struct sk_buff *skb,
2556 unsigned int end_offset)
2557{
2558 skops->skb = skb;
2559 skops->skb_data_end = skb->data + end_offset;
2560}
2561#else
2562static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2563 struct sk_buff *skb,
2564 unsigned int end_offset)
2565{
2566}
2567#endif
2568
2569
2570
2571
2572
2573
2574#ifdef CONFIG_BPF
2575static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2576{
2577 struct bpf_sock_ops_kern sock_ops;
2578 int ret;
2579
2580 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2581 if (sk_fullsock(sk)) {
2582 sock_ops.is_fullsock = 1;
2583 sock_owned_by_me(sk);
2584 }
2585
2586 sock_ops.sk = sk;
2587 sock_ops.op = op;
2588 if (nargs > 0)
2589 memcpy(sock_ops.args, args, nargs * sizeof(*args));
2590
2591 ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2592 if (ret == 0)
2593 ret = sock_ops.reply;
2594 else
2595 ret = -1;
2596 return ret;
2597}
2598
2599static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2600{
2601 u32 args[2] = {arg1, arg2};
2602
2603 return tcp_call_bpf(sk, op, 2, args);
2604}
2605
2606static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2607 u32 arg3)
2608{
2609 u32 args[3] = {arg1, arg2, arg3};
2610
2611 return tcp_call_bpf(sk, op, 3, args);
2612}
2613
2614#else
2615static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2616{
2617 return -EPERM;
2618}
2619
2620static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2621{
2622 return -EPERM;
2623}
2624
2625static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2626 u32 arg3)
2627{
2628 return -EPERM;
2629}
2630
2631#endif
2632
2633static inline u32 tcp_timeout_init(struct sock *sk)
2634{
2635 int timeout;
2636
2637 timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2638
2639 if (timeout <= 0)
2640 timeout = TCP_TIMEOUT_INIT;
2641 return min_t(int, timeout, TCP_RTO_MAX);
2642}
2643
2644static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2645{
2646 int rwnd;
2647
2648 rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2649
2650 if (rwnd < 0)
2651 rwnd = 0;
2652 return rwnd;
2653}
2654
2655static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2656{
2657 return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2658}
2659
2660static inline void tcp_bpf_rtt(struct sock *sk)
2661{
2662 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
2663 tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, NULL);
2664}
2665
2666#if IS_ENABLED(CONFIG_SMC)
2667extern struct static_key_false tcp_have_smc;
2668#endif
2669
2670#if IS_ENABLED(CONFIG_TLS_DEVICE)
2671void clean_acked_data_enable(struct inet_connection_sock *icsk,
2672 void (*cad)(struct sock *sk, u32 ack_seq));
2673void clean_acked_data_disable(struct inet_connection_sock *icsk);
2674void clean_acked_data_flush(void);
2675#endif
2676
2677DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
2678static inline void tcp_add_tx_delay(struct sk_buff *skb,
2679 const struct tcp_sock *tp)
2680{
2681 if (static_branch_unlikely(&tcp_tx_delay_enabled))
2682 skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
2683}
2684
2685
2686
2687
2688static inline u64 tcp_transmit_time(const struct sock *sk)
2689{
2690 if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
2691 u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
2692 tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
2693
2694 return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
2695 }
2696 return 0;
2697}
2698
2699static inline int tcp_parse_auth_options(const struct tcphdr *th,
2700 const u8 **md5_hash, const struct tcp_ao_hdr **aoh)
2701{
2702 const u8 *md5_tmp, *ao_tmp;
2703 int ret;
2704
2705 ret = tcp_do_parse_auth_options(th, &md5_tmp, &ao_tmp);
2706 if (ret)
2707 return ret;
2708
2709 if (md5_hash)
2710 *md5_hash = md5_tmp;
2711
2712 if (aoh) {
2713 if (!ao_tmp)
2714 *aoh = NULL;
2715 else
2716 *aoh = (struct tcp_ao_hdr *)(ao_tmp - 2);
2717 }
2718
2719 return 0;
2720}
2721
2722static inline bool tcp_ao_required(struct sock *sk, const void *saddr,
2723 int family, int l3index, bool stat_inc)
2724{
2725#ifdef CONFIG_TCP_AO
2726 struct tcp_ao_info *ao_info;
2727 struct tcp_ao_key *ao_key;
2728
2729 if (!static_branch_unlikely(&tcp_ao_needed.key))
2730 return false;
2731
2732 ao_info = rcu_dereference_check(tcp_sk(sk)->ao_info,
2733 lockdep_sock_is_held(sk));
2734 if (!ao_info)
2735 return false;
2736
2737 ao_key = tcp_ao_do_lookup(sk, l3index, saddr, family, -1, -1);
2738 if (ao_info->ao_required || ao_key) {
2739 if (stat_inc) {
2740 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOREQUIRED);
2741 atomic64_inc(&ao_info->counters.ao_required);
2742 }
2743 return true;
2744 }
2745#endif
2746 return false;
2747}
2748
2749
2750static inline enum skb_drop_reason
2751tcp_inbound_hash(struct sock *sk, const struct request_sock *req,
2752 const struct sk_buff *skb,
2753 const void *saddr, const void *daddr,
2754 int family, int dif, int sdif)
2755{
2756 const struct tcphdr *th = tcp_hdr(skb);
2757 const struct tcp_ao_hdr *aoh;
2758 const __u8 *md5_location;
2759 int l3index;
2760
2761
2762 if (tcp_parse_auth_options(th, &md5_location, &aoh)) {
2763 tcp_hash_fail("TCP segment has incorrect auth options set",
2764 family, skb, "");
2765 return SKB_DROP_REASON_TCP_AUTH_HDR;
2766 }
2767
2768 if (req) {
2769 if (tcp_rsk_used_ao(req) != !!aoh) {
2770 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD);
2771 tcp_hash_fail("TCP connection can't start/end using TCP-AO",
2772 family, skb, "%s",
2773 !aoh ? "missing AO" : "AO signed");
2774 return SKB_DROP_REASON_TCP_AOFAILURE;
2775 }
2776 }
2777
2778
2779
2780
2781 l3index = sdif ? dif : 0;
2782
2783
2784 if (likely(!md5_location && !aoh)) {
2785
2786
2787
2788
2789
2790 if (tcp_ao_required(sk, saddr, family, l3index, true)) {
2791 tcp_hash_fail("AO hash is required, but not found",
2792 family, skb, "L3 index %d", l3index);
2793 return SKB_DROP_REASON_TCP_AONOTFOUND;
2794 }
2795 if (unlikely(tcp_md5_do_lookup(sk, l3index, saddr, family))) {
2796 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
2797 tcp_hash_fail("MD5 Hash not found",
2798 family, skb, "L3 index %d", l3index);
2799 return SKB_DROP_REASON_TCP_MD5NOTFOUND;
2800 }
2801 return SKB_NOT_DROPPED_YET;
2802 }
2803
2804 if (aoh)
2805 return tcp_inbound_ao_hash(sk, skb, family, req, l3index, aoh);
2806
2807 return tcp_inbound_md5_hash(sk, skb, saddr, daddr, family,
2808 l3index, md5_location);
2809}
2810
2811#endif
2812