1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38#define pr_fmt(fmt) "TCP: " fmt
39
40#include <net/tcp.h>
41#include <net/mptcp.h>
42
43#include <linux/compiler.h>
44#include <linux/gfp.h>
45#include <linux/module.h>
46#include <linux/static_key.h>
47
48#include <trace/events/tcp.h>
49
50
51
52
53void tcp_mstamp_refresh(struct tcp_sock *tp)
54{
55 u64 val = tcp_clock_ns();
56
57 tp->tcp_clock_cache = val;
58 tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC);
59}
60
61static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
62 int push_one, gfp_t gfp);
63
64
65static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
66{
67 struct inet_connection_sock *icsk = inet_csk(sk);
68 struct tcp_sock *tp = tcp_sk(sk);
69 unsigned int prior_packets = tp->packets_out;
70
71 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq);
72
73 __skb_unlink(skb, &sk->sk_write_queue);
74 tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
75
76 if (tp->highest_sack == NULL)
77 tp->highest_sack = skb;
78
79 tp->packets_out += tcp_skb_pcount(skb);
80 if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
81 tcp_rearm_rto(sk);
82
83 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
84 tcp_skb_pcount(skb));
85}
86
87
88
89
90
91
92
93
94static inline __u32 tcp_acceptable_seq(const struct sock *sk)
95{
96 const struct tcp_sock *tp = tcp_sk(sk);
97
98 if (!before(tcp_wnd_end(tp), tp->snd_nxt) ||
99 (tp->rx_opt.wscale_ok &&
100 ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale))))
101 return tp->snd_nxt;
102 else
103 return tcp_wnd_end(tp);
104}
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120static __u16 tcp_advertise_mss(struct sock *sk)
121{
122 struct tcp_sock *tp = tcp_sk(sk);
123 const struct dst_entry *dst = __sk_dst_get(sk);
124 int mss = tp->advmss;
125
126 if (dst) {
127 unsigned int metric = dst_metric_advmss(dst);
128
129 if (metric < mss) {
130 mss = metric;
131 tp->advmss = mss;
132 }
133 }
134
135 return (__u16)mss;
136}
137
138
139
140
141void tcp_cwnd_restart(struct sock *sk, s32 delta)
142{
143 struct tcp_sock *tp = tcp_sk(sk);
144 u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
145 u32 cwnd = tp->snd_cwnd;
146
147 tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
148
149 tp->snd_ssthresh = tcp_current_ssthresh(sk);
150 restart_cwnd = min(restart_cwnd, cwnd);
151
152 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
153 cwnd >>= 1;
154 tp->snd_cwnd = max(cwnd, restart_cwnd);
155 tp->snd_cwnd_stamp = tcp_jiffies32;
156 tp->snd_cwnd_used = 0;
157}
158
159
160static void tcp_event_data_sent(struct tcp_sock *tp,
161 struct sock *sk)
162{
163 struct inet_connection_sock *icsk = inet_csk(sk);
164 const u32 now = tcp_jiffies32;
165
166 if (tcp_packets_in_flight(tp) == 0)
167 tcp_ca_event(sk, CA_EVENT_TX_START);
168
169
170
171
172
173
174 if (before(tp->lsndtime, icsk->icsk_ack.lrcvtime) &&
175 (u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
176 inet_csk_inc_pingpong_cnt(sk);
177
178 tp->lsndtime = now;
179}
180
181
182static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
183 u32 rcv_nxt)
184{
185 struct tcp_sock *tp = tcp_sk(sk);
186
187 if (unlikely(tp->compressed_ack)) {
188 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
189 tp->compressed_ack);
190 tp->compressed_ack = 0;
191 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
192 __sock_put(sk);
193 }
194
195 if (unlikely(rcv_nxt != tp->rcv_nxt))
196 return;
197 tcp_dec_quickack_mode(sk, pkts);
198 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
199}
200
201
202
203
204
205
206
207
208void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
209 __u32 *rcv_wnd, __u32 *window_clamp,
210 int wscale_ok, __u8 *rcv_wscale,
211 __u32 init_rcv_wnd)
212{
213 unsigned int space = (__space < 0 ? 0 : __space);
214
215
216 if (*window_clamp == 0)
217 (*window_clamp) = (U16_MAX << TCP_MAX_WSCALE);
218 space = min(*window_clamp, space);
219
220
221 if (space > mss)
222 space = rounddown(space, mss);
223
224
225
226
227
228
229
230
231
232 if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
233 (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
234 else
235 (*rcv_wnd) = min_t(u32, space, U16_MAX);
236
237 if (init_rcv_wnd)
238 *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
239
240 *rcv_wscale = 0;
241 if (wscale_ok) {
242
243 space = max_t(u32, space, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
244 space = max_t(u32, space, sysctl_rmem_max);
245 space = min_t(u32, space, *window_clamp);
246 *rcv_wscale = clamp_t(int, ilog2(space) - 15,
247 0, TCP_MAX_WSCALE);
248 }
249
250 (*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
251}
252EXPORT_SYMBOL(tcp_select_initial_window);
253
254
255
256
257
258
259static u16 tcp_select_window(struct sock *sk)
260{
261 struct tcp_sock *tp = tcp_sk(sk);
262 u32 old_win = tp->rcv_wnd;
263 u32 cur_win = tcp_receive_window(tp);
264 u32 new_win = __tcp_select_window(sk);
265
266
267 if (new_win < cur_win) {
268
269
270
271
272
273
274
275 if (new_win == 0)
276 NET_INC_STATS(sock_net(sk),
277 LINUX_MIB_TCPWANTZEROWINDOWADV);
278 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
279 }
280 tp->rcv_wnd = new_win;
281 tp->rcv_wup = tp->rcv_nxt;
282
283
284
285
286 if (!tp->rx_opt.rcv_wscale &&
287 sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
288 new_win = min(new_win, MAX_TCP_WINDOW);
289 else
290 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
291
292
293 new_win >>= tp->rx_opt.rcv_wscale;
294
295
296 if (new_win == 0) {
297 tp->pred_flags = 0;
298 if (old_win)
299 NET_INC_STATS(sock_net(sk),
300 LINUX_MIB_TCPTOZEROWINDOWADV);
301 } else if (old_win == 0) {
302 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV);
303 }
304
305 return new_win;
306}
307
308
309static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
310{
311 const struct tcp_sock *tp = tcp_sk(sk);
312
313 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
314 if (!(tp->ecn_flags & TCP_ECN_OK))
315 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
316 else if (tcp_ca_needs_ecn(sk) ||
317 tcp_bpf_ca_needs_ecn(sk))
318 INET_ECN_xmit(sk);
319}
320
321
322static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
323{
324 struct tcp_sock *tp = tcp_sk(sk);
325 bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk);
326 bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 ||
327 tcp_ca_needs_ecn(sk) || bpf_needs_ecn;
328
329 if (!use_ecn) {
330 const struct dst_entry *dst = __sk_dst_get(sk);
331
332 if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
333 use_ecn = true;
334 }
335
336 tp->ecn_flags = 0;
337
338 if (use_ecn) {
339 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
340 tp->ecn_flags = TCP_ECN_OK;
341 if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn)
342 INET_ECN_xmit(sk);
343 }
344}
345
346static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
347{
348 if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)
349
350
351
352 TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
353}
354
355static void
356tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
357{
358 if (inet_rsk(req)->ecn_ok)
359 th->ece = 1;
360}
361
362
363
364
365static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
366 struct tcphdr *th, int tcp_header_len)
367{
368 struct tcp_sock *tp = tcp_sk(sk);
369
370 if (tp->ecn_flags & TCP_ECN_OK) {
371
372 if (skb->len != tcp_header_len &&
373 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
374 INET_ECN_xmit(sk);
375 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
376 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
377 th->cwr = 1;
378 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
379 }
380 } else if (!tcp_ca_needs_ecn(sk)) {
381
382 INET_ECN_dontxmit(sk);
383 }
384 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
385 th->ece = 1;
386 }
387}
388
389
390
391
392static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
393{
394 skb->ip_summed = CHECKSUM_PARTIAL;
395
396 TCP_SKB_CB(skb)->tcp_flags = flags;
397 TCP_SKB_CB(skb)->sacked = 0;
398
399 tcp_skb_pcount_set(skb, 1);
400
401 TCP_SKB_CB(skb)->seq = seq;
402 if (flags & (TCPHDR_SYN | TCPHDR_FIN))
403 seq++;
404 TCP_SKB_CB(skb)->end_seq = seq;
405}
406
407static inline bool tcp_urg_mode(const struct tcp_sock *tp)
408{
409 return tp->snd_una != tp->snd_up;
410}
411
412#define OPTION_SACK_ADVERTISE (1 << 0)
413#define OPTION_TS (1 << 1)
414#define OPTION_MD5 (1 << 2)
415#define OPTION_WSCALE (1 << 3)
416#define OPTION_FAST_OPEN_COOKIE (1 << 8)
417#define OPTION_SMC (1 << 9)
418#define OPTION_MPTCP (1 << 10)
419
420static void smc_options_write(__be32 *ptr, u16 *options)
421{
422#if IS_ENABLED(CONFIG_SMC)
423 if (static_branch_unlikely(&tcp_have_smc)) {
424 if (unlikely(OPTION_SMC & *options)) {
425 *ptr++ = htonl((TCPOPT_NOP << 24) |
426 (TCPOPT_NOP << 16) |
427 (TCPOPT_EXP << 8) |
428 (TCPOLEN_EXP_SMC_BASE));
429 *ptr++ = htonl(TCPOPT_SMC_MAGIC);
430 }
431 }
432#endif
433}
434
435struct tcp_out_options {
436 u16 options;
437 u16 mss;
438 u8 ws;
439 u8 num_sack_blocks;
440 u8 hash_size;
441 u8 bpf_opt_len;
442 __u8 *hash_location;
443 __u32 tsval, tsecr;
444 struct tcp_fastopen_cookie *fastopen_cookie;
445 struct mptcp_out_options mptcp;
446};
447
448static void mptcp_options_write(__be32 *ptr, const struct tcp_sock *tp,
449 struct tcp_out_options *opts)
450{
451#if IS_ENABLED(CONFIG_MPTCP)
452 if (unlikely(OPTION_MPTCP & opts->options))
453 mptcp_write_options(ptr, tp, &opts->mptcp);
454#endif
455}
456
457#ifdef CONFIG_CGROUP_BPF
458static int bpf_skops_write_hdr_opt_arg0(struct sk_buff *skb,
459 enum tcp_synack_type synack_type)
460{
461 if (unlikely(!skb))
462 return BPF_WRITE_HDR_TCP_CURRENT_MSS;
463
464 if (unlikely(synack_type == TCP_SYNACK_COOKIE))
465 return BPF_WRITE_HDR_TCP_SYNACK_COOKIE;
466
467 return 0;
468}
469
470
471static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
472 struct request_sock *req,
473 struct sk_buff *syn_skb,
474 enum tcp_synack_type synack_type,
475 struct tcp_out_options *opts,
476 unsigned int *remaining)
477{
478 struct bpf_sock_ops_kern sock_ops;
479 int err;
480
481 if (likely(!BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk),
482 BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG)) ||
483 !*remaining)
484 return;
485
486
487
488
489 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
490
491 sock_ops.op = BPF_SOCK_OPS_HDR_OPT_LEN_CB;
492
493 if (req) {
494
495
496
497
498
499
500
501
502
503
504
505
506
507 sock_ops.sk = (struct sock *)req;
508 sock_ops.syn_skb = syn_skb;
509 } else {
510 sock_owned_by_me(sk);
511
512 sock_ops.is_fullsock = 1;
513 sock_ops.sk = sk;
514 }
515
516 sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type);
517 sock_ops.remaining_opt_len = *remaining;
518
519 if (skb)
520 bpf_skops_init_skb(&sock_ops, skb, 0);
521
522 err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk);
523
524 if (err || sock_ops.remaining_opt_len == *remaining)
525 return;
526
527 opts->bpf_opt_len = *remaining - sock_ops.remaining_opt_len;
528
529 opts->bpf_opt_len = (opts->bpf_opt_len + 3) & ~3;
530
531 *remaining -= opts->bpf_opt_len;
532}
533
534static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
535 struct request_sock *req,
536 struct sk_buff *syn_skb,
537 enum tcp_synack_type synack_type,
538 struct tcp_out_options *opts)
539{
540 u8 first_opt_off, nr_written, max_opt_len = opts->bpf_opt_len;
541 struct bpf_sock_ops_kern sock_ops;
542 int err;
543
544 if (likely(!max_opt_len))
545 return;
546
547 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
548
549 sock_ops.op = BPF_SOCK_OPS_WRITE_HDR_OPT_CB;
550
551 if (req) {
552 sock_ops.sk = (struct sock *)req;
553 sock_ops.syn_skb = syn_skb;
554 } else {
555 sock_owned_by_me(sk);
556
557 sock_ops.is_fullsock = 1;
558 sock_ops.sk = sk;
559 }
560
561 sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type);
562 sock_ops.remaining_opt_len = max_opt_len;
563 first_opt_off = tcp_hdrlen(skb) - max_opt_len;
564 bpf_skops_init_skb(&sock_ops, skb, first_opt_off);
565
566 err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk);
567
568 if (err)
569 nr_written = 0;
570 else
571 nr_written = max_opt_len - sock_ops.remaining_opt_len;
572
573 if (nr_written < max_opt_len)
574 memset(skb->data + first_opt_off + nr_written, TCPOPT_NOP,
575 max_opt_len - nr_written);
576}
577#else
578static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
579 struct request_sock *req,
580 struct sk_buff *syn_skb,
581 enum tcp_synack_type synack_type,
582 struct tcp_out_options *opts,
583 unsigned int *remaining)
584{
585}
586
587static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
588 struct request_sock *req,
589 struct sk_buff *syn_skb,
590 enum tcp_synack_type synack_type,
591 struct tcp_out_options *opts)
592{
593}
594#endif
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
610 struct tcp_out_options *opts)
611{
612 u16 options = opts->options;
613
614 if (unlikely(OPTION_MD5 & options)) {
615 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
616 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
617
618 opts->hash_location = (__u8 *)ptr;
619 ptr += 4;
620 }
621
622 if (unlikely(opts->mss)) {
623 *ptr++ = htonl((TCPOPT_MSS << 24) |
624 (TCPOLEN_MSS << 16) |
625 opts->mss);
626 }
627
628 if (likely(OPTION_TS & options)) {
629 if (unlikely(OPTION_SACK_ADVERTISE & options)) {
630 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
631 (TCPOLEN_SACK_PERM << 16) |
632 (TCPOPT_TIMESTAMP << 8) |
633 TCPOLEN_TIMESTAMP);
634 options &= ~OPTION_SACK_ADVERTISE;
635 } else {
636 *ptr++ = htonl((TCPOPT_NOP << 24) |
637 (TCPOPT_NOP << 16) |
638 (TCPOPT_TIMESTAMP << 8) |
639 TCPOLEN_TIMESTAMP);
640 }
641 *ptr++ = htonl(opts->tsval);
642 *ptr++ = htonl(opts->tsecr);
643 }
644
645 if (unlikely(OPTION_SACK_ADVERTISE & options)) {
646 *ptr++ = htonl((TCPOPT_NOP << 24) |
647 (TCPOPT_NOP << 16) |
648 (TCPOPT_SACK_PERM << 8) |
649 TCPOLEN_SACK_PERM);
650 }
651
652 if (unlikely(OPTION_WSCALE & options)) {
653 *ptr++ = htonl((TCPOPT_NOP << 24) |
654 (TCPOPT_WINDOW << 16) |
655 (TCPOLEN_WINDOW << 8) |
656 opts->ws);
657 }
658
659 if (unlikely(opts->num_sack_blocks)) {
660 struct tcp_sack_block *sp = tp->rx_opt.dsack ?
661 tp->duplicate_sack : tp->selective_acks;
662 int this_sack;
663
664 *ptr++ = htonl((TCPOPT_NOP << 24) |
665 (TCPOPT_NOP << 16) |
666 (TCPOPT_SACK << 8) |
667 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
668 TCPOLEN_SACK_PERBLOCK)));
669
670 for (this_sack = 0; this_sack < opts->num_sack_blocks;
671 ++this_sack) {
672 *ptr++ = htonl(sp[this_sack].start_seq);
673 *ptr++ = htonl(sp[this_sack].end_seq);
674 }
675
676 tp->rx_opt.dsack = 0;
677 }
678
679 if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
680 struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
681 u8 *p = (u8 *)ptr;
682 u32 len;
683
684 if (foc->exp) {
685 len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
686 *ptr = htonl((TCPOPT_EXP << 24) | (len << 16) |
687 TCPOPT_FASTOPEN_MAGIC);
688 p += TCPOLEN_EXP_FASTOPEN_BASE;
689 } else {
690 len = TCPOLEN_FASTOPEN_BASE + foc->len;
691 *p++ = TCPOPT_FASTOPEN;
692 *p++ = len;
693 }
694
695 memcpy(p, foc->val, foc->len);
696 if ((len & 3) == 2) {
697 p[foc->len] = TCPOPT_NOP;
698 p[foc->len + 1] = TCPOPT_NOP;
699 }
700 ptr += (len + 3) >> 2;
701 }
702
703 smc_options_write(ptr, &options);
704
705 mptcp_options_write(ptr, tp, opts);
706}
707
708static void smc_set_option(const struct tcp_sock *tp,
709 struct tcp_out_options *opts,
710 unsigned int *remaining)
711{
712#if IS_ENABLED(CONFIG_SMC)
713 if (static_branch_unlikely(&tcp_have_smc)) {
714 if (tp->syn_smc) {
715 if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
716 opts->options |= OPTION_SMC;
717 *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
718 }
719 }
720 }
721#endif
722}
723
724static void smc_set_option_cond(const struct tcp_sock *tp,
725 const struct inet_request_sock *ireq,
726 struct tcp_out_options *opts,
727 unsigned int *remaining)
728{
729#if IS_ENABLED(CONFIG_SMC)
730 if (static_branch_unlikely(&tcp_have_smc)) {
731 if (tp->syn_smc && ireq->smc_ok) {
732 if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
733 opts->options |= OPTION_SMC;
734 *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
735 }
736 }
737 }
738#endif
739}
740
741static void mptcp_set_option_cond(const struct request_sock *req,
742 struct tcp_out_options *opts,
743 unsigned int *remaining)
744{
745 if (rsk_is_mptcp(req)) {
746 unsigned int size;
747
748 if (mptcp_synack_options(req, &size, &opts->mptcp)) {
749 if (*remaining >= size) {
750 opts->options |= OPTION_MPTCP;
751 *remaining -= size;
752 }
753 }
754 }
755}
756
757
758
759
760static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
761 struct tcp_out_options *opts,
762 struct tcp_md5sig_key **md5)
763{
764 struct tcp_sock *tp = tcp_sk(sk);
765 unsigned int remaining = MAX_TCP_OPTION_SPACE;
766 struct tcp_fastopen_request *fastopen = tp->fastopen_req;
767
768 *md5 = NULL;
769#ifdef CONFIG_TCP_MD5SIG
770 if (static_branch_unlikely(&tcp_md5_needed) &&
771 rcu_access_pointer(tp->md5sig_info)) {
772 *md5 = tp->af_specific->md5_lookup(sk, sk);
773 if (*md5) {
774 opts->options |= OPTION_MD5;
775 remaining -= TCPOLEN_MD5SIG_ALIGNED;
776 }
777 }
778#endif
779
780
781
782
783
784
785
786
787
788
789 opts->mss = tcp_advertise_mss(sk);
790 remaining -= TCPOLEN_MSS_ALIGNED;
791
792 if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) {
793 opts->options |= OPTION_TS;
794 opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
795 opts->tsecr = tp->rx_opt.ts_recent;
796 remaining -= TCPOLEN_TSTAMP_ALIGNED;
797 }
798 if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
799 opts->ws = tp->rx_opt.rcv_wscale;
800 opts->options |= OPTION_WSCALE;
801 remaining -= TCPOLEN_WSCALE_ALIGNED;
802 }
803 if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) {
804 opts->options |= OPTION_SACK_ADVERTISE;
805 if (unlikely(!(OPTION_TS & opts->options)))
806 remaining -= TCPOLEN_SACKPERM_ALIGNED;
807 }
808
809 if (fastopen && fastopen->cookie.len >= 0) {
810 u32 need = fastopen->cookie.len;
811
812 need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE :
813 TCPOLEN_FASTOPEN_BASE;
814 need = (need + 3) & ~3U;
815 if (remaining >= need) {
816 opts->options |= OPTION_FAST_OPEN_COOKIE;
817 opts->fastopen_cookie = &fastopen->cookie;
818 remaining -= need;
819 tp->syn_fastopen = 1;
820 tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0;
821 }
822 }
823
824 smc_set_option(tp, opts, &remaining);
825
826 if (sk_is_mptcp(sk)) {
827 unsigned int size;
828
829 if (mptcp_syn_options(sk, skb, &size, &opts->mptcp)) {
830 opts->options |= OPTION_MPTCP;
831 remaining -= size;
832 }
833 }
834
835 bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
836
837 return MAX_TCP_OPTION_SPACE - remaining;
838}
839
840
841static unsigned int tcp_synack_options(const struct sock *sk,
842 struct request_sock *req,
843 unsigned int mss, struct sk_buff *skb,
844 struct tcp_out_options *opts,
845 const struct tcp_md5sig_key *md5,
846 struct tcp_fastopen_cookie *foc,
847 enum tcp_synack_type synack_type,
848 struct sk_buff *syn_skb)
849{
850 struct inet_request_sock *ireq = inet_rsk(req);
851 unsigned int remaining = MAX_TCP_OPTION_SPACE;
852
853#ifdef CONFIG_TCP_MD5SIG
854 if (md5) {
855 opts->options |= OPTION_MD5;
856 remaining -= TCPOLEN_MD5SIG_ALIGNED;
857
858
859
860
861
862
863 if (synack_type != TCP_SYNACK_COOKIE)
864 ireq->tstamp_ok &= !ireq->sack_ok;
865 }
866#endif
867
868
869 opts->mss = mss;
870 remaining -= TCPOLEN_MSS_ALIGNED;
871
872 if (likely(ireq->wscale_ok)) {
873 opts->ws = ireq->rcv_wscale;
874 opts->options |= OPTION_WSCALE;
875 remaining -= TCPOLEN_WSCALE_ALIGNED;
876 }
877 if (likely(ireq->tstamp_ok)) {
878 opts->options |= OPTION_TS;
879 opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off;
880 opts->tsecr = req->ts_recent;
881 remaining -= TCPOLEN_TSTAMP_ALIGNED;
882 }
883 if (likely(ireq->sack_ok)) {
884 opts->options |= OPTION_SACK_ADVERTISE;
885 if (unlikely(!ireq->tstamp_ok))
886 remaining -= TCPOLEN_SACKPERM_ALIGNED;
887 }
888 if (foc != NULL && foc->len >= 0) {
889 u32 need = foc->len;
890
891 need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
892 TCPOLEN_FASTOPEN_BASE;
893 need = (need + 3) & ~3U;
894 if (remaining >= need) {
895 opts->options |= OPTION_FAST_OPEN_COOKIE;
896 opts->fastopen_cookie = foc;
897 remaining -= need;
898 }
899 }
900
901 mptcp_set_option_cond(req, opts, &remaining);
902
903 smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining);
904
905 bpf_skops_hdr_opt_len((struct sock *)sk, skb, req, syn_skb,
906 synack_type, opts, &remaining);
907
908 return MAX_TCP_OPTION_SPACE - remaining;
909}
910
911
912
913
914static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
915 struct tcp_out_options *opts,
916 struct tcp_md5sig_key **md5)
917{
918 struct tcp_sock *tp = tcp_sk(sk);
919 unsigned int size = 0;
920 unsigned int eff_sacks;
921
922 opts->options = 0;
923
924 *md5 = NULL;
925#ifdef CONFIG_TCP_MD5SIG
926 if (static_branch_unlikely(&tcp_md5_needed) &&
927 rcu_access_pointer(tp->md5sig_info)) {
928 *md5 = tp->af_specific->md5_lookup(sk, sk);
929 if (*md5) {
930 opts->options |= OPTION_MD5;
931 size += TCPOLEN_MD5SIG_ALIGNED;
932 }
933 }
934#endif
935
936 if (likely(tp->rx_opt.tstamp_ok)) {
937 opts->options |= OPTION_TS;
938 opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0;
939 opts->tsecr = tp->rx_opt.ts_recent;
940 size += TCPOLEN_TSTAMP_ALIGNED;
941 }
942
943
944
945
946
947
948
949 if (sk_is_mptcp(sk)) {
950 unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
951 unsigned int opt_size = 0;
952
953 if (mptcp_established_options(sk, skb, &opt_size, remaining,
954 &opts->mptcp)) {
955 opts->options |= OPTION_MPTCP;
956 size += opt_size;
957 }
958 }
959
960 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
961 if (unlikely(eff_sacks)) {
962 const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
963 if (unlikely(remaining < TCPOLEN_SACK_BASE_ALIGNED +
964 TCPOLEN_SACK_PERBLOCK))
965 return size;
966
967 opts->num_sack_blocks =
968 min_t(unsigned int, eff_sacks,
969 (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
970 TCPOLEN_SACK_PERBLOCK);
971
972 size += TCPOLEN_SACK_BASE_ALIGNED +
973 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
974 }
975
976 if (unlikely(BPF_SOCK_OPS_TEST_FLAG(tp,
977 BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG))) {
978 unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
979
980 bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
981
982 size = MAX_TCP_OPTION_SPACE - remaining;
983 }
984
985 return size;
986}
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003struct tsq_tasklet {
1004 struct tasklet_struct tasklet;
1005 struct list_head head;
1006};
1007static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
1008
1009static void tcp_tsq_write(struct sock *sk)
1010{
1011 if ((1 << sk->sk_state) &
1012 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
1013 TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) {
1014 struct tcp_sock *tp = tcp_sk(sk);
1015
1016 if (tp->lost_out > tp->retrans_out &&
1017 tp->snd_cwnd > tcp_packets_in_flight(tp)) {
1018 tcp_mstamp_refresh(tp);
1019 tcp_xmit_retransmit_queue(sk);
1020 }
1021
1022 tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
1023 0, GFP_ATOMIC);
1024 }
1025}
1026
1027static void tcp_tsq_handler(struct sock *sk)
1028{
1029 bh_lock_sock(sk);
1030 if (!sock_owned_by_user(sk))
1031 tcp_tsq_write(sk);
1032 else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
1033 sock_hold(sk);
1034 bh_unlock_sock(sk);
1035}
1036
1037
1038
1039
1040
1041
1042static void tcp_tasklet_func(struct tasklet_struct *t)
1043{
1044 struct tsq_tasklet *tsq = from_tasklet(tsq, t, tasklet);
1045 LIST_HEAD(list);
1046 unsigned long flags;
1047 struct list_head *q, *n;
1048 struct tcp_sock *tp;
1049 struct sock *sk;
1050
1051 local_irq_save(flags);
1052 list_splice_init(&tsq->head, &list);
1053 local_irq_restore(flags);
1054
1055 list_for_each_safe(q, n, &list) {
1056 tp = list_entry(q, struct tcp_sock, tsq_node);
1057 list_del(&tp->tsq_node);
1058
1059 sk = (struct sock *)tp;
1060 smp_mb__before_atomic();
1061 clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
1062
1063 tcp_tsq_handler(sk);
1064 sk_free(sk);
1065 }
1066}
1067
1068#define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED | \
1069 TCPF_WRITE_TIMER_DEFERRED | \
1070 TCPF_DELACK_TIMER_DEFERRED | \
1071 TCPF_MTU_REDUCED_DEFERRED)
1072
1073
1074
1075
1076
1077
1078
1079void tcp_release_cb(struct sock *sk)
1080{
1081 unsigned long flags, nflags;
1082
1083
1084 do {
1085 flags = sk->sk_tsq_flags;
1086 if (!(flags & TCP_DEFERRED_ALL))
1087 return;
1088 nflags = flags & ~TCP_DEFERRED_ALL;
1089 } while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
1090
1091 if (flags & TCPF_TSQ_DEFERRED) {
1092 tcp_tsq_write(sk);
1093 __sock_put(sk);
1094 }
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104 sock_release_ownership(sk);
1105
1106 if (flags & TCPF_WRITE_TIMER_DEFERRED) {
1107 tcp_write_timer_handler(sk);
1108 __sock_put(sk);
1109 }
1110 if (flags & TCPF_DELACK_TIMER_DEFERRED) {
1111 tcp_delack_timer_handler(sk);
1112 __sock_put(sk);
1113 }
1114 if (flags & TCPF_MTU_REDUCED_DEFERRED) {
1115 inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
1116 __sock_put(sk);
1117 }
1118}
1119EXPORT_SYMBOL(tcp_release_cb);
1120
1121void __init tcp_tasklet_init(void)
1122{
1123 int i;
1124
1125 for_each_possible_cpu(i) {
1126 struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
1127
1128 INIT_LIST_HEAD(&tsq->head);
1129 tasklet_setup(&tsq->tasklet, tcp_tasklet_func);
1130 }
1131}
1132
1133
1134
1135
1136
1137
1138void tcp_wfree(struct sk_buff *skb)
1139{
1140 struct sock *sk = skb->sk;
1141 struct tcp_sock *tp = tcp_sk(sk);
1142 unsigned long flags, nval, oval;
1143
1144
1145
1146
1147 WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
1148
1149
1150
1151
1152
1153
1154
1155
1156 if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
1157 goto out;
1158
1159 for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
1160 struct tsq_tasklet *tsq;
1161 bool empty;
1162
1163 if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
1164 goto out;
1165
1166 nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED;
1167 nval = cmpxchg(&sk->sk_tsq_flags, oval, nval);
1168 if (nval != oval)
1169 continue;
1170
1171
1172 local_irq_save(flags);
1173 tsq = this_cpu_ptr(&tsq_tasklet);
1174 empty = list_empty(&tsq->head);
1175 list_add(&tp->tsq_node, &tsq->head);
1176 if (empty)
1177 tasklet_schedule(&tsq->tasklet);
1178 local_irq_restore(flags);
1179 return;
1180 }
1181out:
1182 sk_free(sk);
1183}
1184
1185
1186
1187
1188enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
1189{
1190 struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer);
1191 struct sock *sk = (struct sock *)tp;
1192
1193 tcp_tsq_handler(sk);
1194 sock_put(sk);
1195
1196 return HRTIMER_NORESTART;
1197}
1198
1199static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
1200 u64 prior_wstamp)
1201{
1202 struct tcp_sock *tp = tcp_sk(sk);
1203
1204 if (sk->sk_pacing_status != SK_PACING_NONE) {
1205 unsigned long rate = sk->sk_pacing_rate;
1206
1207
1208
1209
1210
1211 if (rate != ~0UL && rate && tp->data_segs_out >= 10) {
1212 u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate);
1213 u64 credit = tp->tcp_wstamp_ns - prior_wstamp;
1214
1215
1216 len_ns -= min_t(u64, len_ns / 2, credit);
1217 tp->tcp_wstamp_ns += len_ns;
1218 }
1219 }
1220 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
1221}
1222
1223INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
1224INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
1225INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb));
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
1239 int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
1240{
1241 const struct inet_connection_sock *icsk = inet_csk(sk);
1242 struct inet_sock *inet;
1243 struct tcp_sock *tp;
1244 struct tcp_skb_cb *tcb;
1245 struct tcp_out_options opts;
1246 unsigned int tcp_options_size, tcp_header_size;
1247 struct sk_buff *oskb = NULL;
1248 struct tcp_md5sig_key *md5;
1249 struct tcphdr *th;
1250 u64 prior_wstamp;
1251 int err;
1252
1253 BUG_ON(!skb || !tcp_skb_pcount(skb));
1254 tp = tcp_sk(sk);
1255 prior_wstamp = tp->tcp_wstamp_ns;
1256 tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
1257 skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
1258 if (clone_it) {
1259 TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
1260 - tp->snd_una;
1261 oskb = skb;
1262
1263 tcp_skb_tsorted_save(oskb) {
1264 if (unlikely(skb_cloned(oskb)))
1265 skb = pskb_copy(oskb, gfp_mask);
1266 else
1267 skb = skb_clone(oskb, gfp_mask);
1268 } tcp_skb_tsorted_restore(oskb);
1269
1270 if (unlikely(!skb))
1271 return -ENOBUFS;
1272
1273
1274
1275 skb->dev = NULL;
1276 }
1277
1278 inet = inet_sk(sk);
1279 tcb = TCP_SKB_CB(skb);
1280 memset(&opts, 0, sizeof(opts));
1281
1282 if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
1283 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
1284 } else {
1285 tcp_options_size = tcp_established_options(sk, skb, &opts,
1286 &md5);
1287
1288
1289
1290
1291
1292
1293
1294
1295 if (tcp_skb_pcount(skb) > 1)
1296 tcb->tcp_flags |= TCPHDR_PSH;
1297 }
1298 tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
1299
1300
1301
1302
1303
1304
1305
1306
1307 skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1);
1308
1309
1310
1311
1312
1313
1314 skb->pfmemalloc = 0;
1315
1316 skb_push(skb, tcp_header_size);
1317 skb_reset_transport_header(skb);
1318
1319 skb_orphan(skb);
1320 skb->sk = sk;
1321 skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
1322 refcount_add(skb->truesize, &sk->sk_wmem_alloc);
1323
1324 skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
1325
1326
1327 th = (struct tcphdr *)skb->data;
1328 th->source = inet->inet_sport;
1329 th->dest = inet->inet_dport;
1330 th->seq = htonl(tcb->seq);
1331 th->ack_seq = htonl(rcv_nxt);
1332 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
1333 tcb->tcp_flags);
1334
1335 th->check = 0;
1336 th->urg_ptr = 0;
1337
1338
1339 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
1340 if (before(tp->snd_up, tcb->seq + 0x10000)) {
1341 th->urg_ptr = htons(tp->snd_up - tcb->seq);
1342 th->urg = 1;
1343 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
1344 th->urg_ptr = htons(0xFFFF);
1345 th->urg = 1;
1346 }
1347 }
1348
1349 skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1350 if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) {
1351 th->window = htons(tcp_select_window(sk));
1352 tcp_ecn_send(sk, skb, th, tcp_header_size);
1353 } else {
1354
1355
1356
1357 th->window = htons(min(tp->rcv_wnd, 65535U));
1358 }
1359
1360 tcp_options_write((__be32 *)(th + 1), tp, &opts);
1361
1362#ifdef CONFIG_TCP_MD5SIG
1363
1364 if (md5) {
1365 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1366 tp->af_specific->calc_md5_hash(opts.hash_location,
1367 md5, sk, skb);
1368 }
1369#endif
1370
1371
1372 bpf_skops_write_hdr_opt(sk, skb, NULL, NULL, 0, &opts);
1373
1374 INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check,
1375 tcp_v6_send_check, tcp_v4_send_check,
1376 sk, skb);
1377
1378 if (likely(tcb->tcp_flags & TCPHDR_ACK))
1379 tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
1380
1381 if (skb->len != tcp_header_size) {
1382 tcp_event_data_sent(tp, sk);
1383 tp->data_segs_out += tcp_skb_pcount(skb);
1384 tp->bytes_sent += skb->len - tcp_header_size;
1385 }
1386
1387 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
1388 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
1389 tcp_skb_pcount(skb));
1390
1391 tp->segs_out += tcp_skb_pcount(skb);
1392 skb_set_hash_from_sk(skb, sk);
1393
1394 skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
1395 skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
1396
1397
1398
1399
1400 memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
1401 sizeof(struct inet6_skb_parm)));
1402
1403 tcp_add_tx_delay(skb, tp);
1404
1405 err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit,
1406 inet6_csk_xmit, ip_queue_xmit,
1407 sk, skb, &inet->cork.fl);
1408
1409 if (unlikely(err > 0)) {
1410 tcp_enter_cwr(sk);
1411 err = net_xmit_eval(err);
1412 }
1413 if (!err && oskb) {
1414 tcp_update_skb_after_send(sk, oskb, prior_wstamp);
1415 tcp_rate_skb_sent(sk, oskb);
1416 }
1417 return err;
1418}
1419
1420static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1421 gfp_t gfp_mask)
1422{
1423 return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
1424 tcp_sk(sk)->rcv_nxt);
1425}
1426
1427
1428
1429
1430
1431
1432static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
1433{
1434 struct tcp_sock *tp = tcp_sk(sk);
1435
1436
1437 WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq);
1438 __skb_header_release(skb);
1439 tcp_add_write_queue_tail(sk, skb);
1440 sk_wmem_queued_add(sk, skb->truesize);
1441 sk_mem_charge(sk, skb->truesize);
1442}
1443
1444
1445static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1446{
1447 if (skb->len <= mss_now) {
1448
1449
1450
1451 tcp_skb_pcount_set(skb, 1);
1452 TCP_SKB_CB(skb)->tcp_gso_size = 0;
1453 } else {
1454 tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
1455 TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
1456 }
1457}
1458
1459
1460
1461
1462static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1463{
1464 struct tcp_sock *tp = tcp_sk(sk);
1465
1466 tp->packets_out -= decr;
1467
1468 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1469 tp->sacked_out -= decr;
1470 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1471 tp->retrans_out -= decr;
1472 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1473 tp->lost_out -= decr;
1474
1475
1476 if (tcp_is_reno(tp) && decr > 0)
1477 tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1478
1479 if (tp->lost_skb_hint &&
1480 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
1481 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
1482 tp->lost_cnt_hint -= decr;
1483
1484 tcp_verify_left_out(tp);
1485}
1486
1487static bool tcp_has_tx_tstamp(const struct sk_buff *skb)
1488{
1489 return TCP_SKB_CB(skb)->txstamp_ack ||
1490 (skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP);
1491}
1492
1493static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
1494{
1495 struct skb_shared_info *shinfo = skb_shinfo(skb);
1496
1497 if (unlikely(tcp_has_tx_tstamp(skb)) &&
1498 !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1499 struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1500 u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1501
1502 shinfo->tx_flags &= ~tsflags;
1503 shinfo2->tx_flags |= tsflags;
1504 swap(shinfo->tskey, shinfo2->tskey);
1505 TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack;
1506 TCP_SKB_CB(skb)->txstamp_ack = 0;
1507 }
1508}
1509
1510static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2)
1511{
1512 TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor;
1513 TCP_SKB_CB(skb)->eor = 0;
1514}
1515
1516
1517static void tcp_insert_write_queue_after(struct sk_buff *skb,
1518 struct sk_buff *buff,
1519 struct sock *sk,
1520 enum tcp_queue tcp_queue)
1521{
1522 if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE)
1523 __skb_queue_after(&sk->sk_write_queue, skb, buff);
1524 else
1525 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
1526}
1527
1528
1529
1530
1531
1532
1533int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1534 struct sk_buff *skb, u32 len,
1535 unsigned int mss_now, gfp_t gfp)
1536{
1537 struct tcp_sock *tp = tcp_sk(sk);
1538 struct sk_buff *buff;
1539 int nsize, old_factor;
1540 long limit;
1541 int nlen;
1542 u8 flags;
1543
1544 if (WARN_ON(len > skb->len))
1545 return -EINVAL;
1546
1547 nsize = skb_headlen(skb) - len;
1548 if (nsize < 0)
1549 nsize = 0;
1550
1551
1552
1553
1554
1555
1556 limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
1557 if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
1558 tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
1559 skb != tcp_rtx_queue_head(sk) &&
1560 skb != tcp_rtx_queue_tail(sk))) {
1561 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
1562 return -ENOMEM;
1563 }
1564
1565 if (skb_unclone(skb, gfp))
1566 return -ENOMEM;
1567
1568
1569 buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
1570 if (!buff)
1571 return -ENOMEM;
1572 skb_copy_decrypted(buff, skb);
1573 mptcp_skb_ext_copy(buff, skb);
1574
1575 sk_wmem_queued_add(sk, buff->truesize);
1576 sk_mem_charge(sk, buff->truesize);
1577 nlen = skb->len - len - nsize;
1578 buff->truesize += nlen;
1579 skb->truesize -= nlen;
1580
1581
1582 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1583 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1584 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1585
1586
1587 flags = TCP_SKB_CB(skb)->tcp_flags;
1588 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1589 TCP_SKB_CB(buff)->tcp_flags = flags;
1590 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1591 tcp_skb_fragment_eor(skb, buff);
1592
1593 skb_split(skb, buff, len);
1594
1595 buff->ip_summed = CHECKSUM_PARTIAL;
1596
1597 buff->tstamp = skb->tstamp;
1598 tcp_fragment_tstamp(skb, buff);
1599
1600 old_factor = tcp_skb_pcount(skb);
1601
1602
1603 tcp_set_skb_tso_segs(skb, mss_now);
1604 tcp_set_skb_tso_segs(buff, mss_now);
1605
1606
1607 TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
1608
1609
1610
1611
1612 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
1613 int diff = old_factor - tcp_skb_pcount(skb) -
1614 tcp_skb_pcount(buff);
1615
1616 if (diff)
1617 tcp_adjust_pcount(sk, skb, diff);
1618 }
1619
1620
1621 __skb_header_release(buff);
1622 tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1623 if (tcp_queue == TCP_FRAG_IN_RTX_QUEUE)
1624 list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor);
1625
1626 return 0;
1627}
1628
1629
1630
1631
1632static int __pskb_trim_head(struct sk_buff *skb, int len)
1633{
1634 struct skb_shared_info *shinfo;
1635 int i, k, eat;
1636
1637 eat = min_t(int, len, skb_headlen(skb));
1638 if (eat) {
1639 __skb_pull(skb, eat);
1640 len -= eat;
1641 if (!len)
1642 return 0;
1643 }
1644 eat = len;
1645 k = 0;
1646 shinfo = skb_shinfo(skb);
1647 for (i = 0; i < shinfo->nr_frags; i++) {
1648 int size = skb_frag_size(&shinfo->frags[i]);
1649
1650 if (size <= eat) {
1651 skb_frag_unref(skb, i);
1652 eat -= size;
1653 } else {
1654 shinfo->frags[k] = shinfo->frags[i];
1655 if (eat) {
1656 skb_frag_off_add(&shinfo->frags[k], eat);
1657 skb_frag_size_sub(&shinfo->frags[k], eat);
1658 eat = 0;
1659 }
1660 k++;
1661 }
1662 }
1663 shinfo->nr_frags = k;
1664
1665 skb->data_len -= len;
1666 skb->len = skb->data_len;
1667 return len;
1668}
1669
1670
1671int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1672{
1673 u32 delta_truesize;
1674
1675 if (skb_unclone(skb, GFP_ATOMIC))
1676 return -ENOMEM;
1677
1678 delta_truesize = __pskb_trim_head(skb, len);
1679
1680 TCP_SKB_CB(skb)->seq += len;
1681 skb->ip_summed = CHECKSUM_PARTIAL;
1682
1683 if (delta_truesize) {
1684 skb->truesize -= delta_truesize;
1685 sk_wmem_queued_add(sk, -delta_truesize);
1686 sk_mem_uncharge(sk, delta_truesize);
1687 }
1688
1689
1690 if (tcp_skb_pcount(skb) > 1)
1691 tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
1692
1693 return 0;
1694}
1695
1696
1697static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
1698{
1699 const struct tcp_sock *tp = tcp_sk(sk);
1700 const struct inet_connection_sock *icsk = inet_csk(sk);
1701 int mss_now;
1702
1703
1704
1705
1706 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
1707
1708
1709 if (icsk->icsk_af_ops->net_frag_header_len) {
1710 const struct dst_entry *dst = __sk_dst_get(sk);
1711
1712 if (dst && dst_allfrag(dst))
1713 mss_now -= icsk->icsk_af_ops->net_frag_header_len;
1714 }
1715
1716
1717 if (mss_now > tp->rx_opt.mss_clamp)
1718 mss_now = tp->rx_opt.mss_clamp;
1719
1720
1721 mss_now -= icsk->icsk_ext_hdr_len;
1722
1723
1724 mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss);
1725 return mss_now;
1726}
1727
1728
1729int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1730{
1731
1732 return __tcp_mtu_to_mss(sk, pmtu) -
1733 (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
1734}
1735EXPORT_SYMBOL(tcp_mtu_to_mss);
1736
1737
1738int tcp_mss_to_mtu(struct sock *sk, int mss)
1739{
1740 const struct tcp_sock *tp = tcp_sk(sk);
1741 const struct inet_connection_sock *icsk = inet_csk(sk);
1742 int mtu;
1743
1744 mtu = mss +
1745 tp->tcp_header_len +
1746 icsk->icsk_ext_hdr_len +
1747 icsk->icsk_af_ops->net_header_len;
1748
1749
1750 if (icsk->icsk_af_ops->net_frag_header_len) {
1751 const struct dst_entry *dst = __sk_dst_get(sk);
1752
1753 if (dst && dst_allfrag(dst))
1754 mtu += icsk->icsk_af_ops->net_frag_header_len;
1755 }
1756 return mtu;
1757}
1758EXPORT_SYMBOL(tcp_mss_to_mtu);
1759
1760
1761void tcp_mtup_init(struct sock *sk)
1762{
1763 struct tcp_sock *tp = tcp_sk(sk);
1764 struct inet_connection_sock *icsk = inet_csk(sk);
1765 struct net *net = sock_net(sk);
1766
1767 icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1;
1768 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
1769 icsk->icsk_af_ops->net_header_len;
1770 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
1771 icsk->icsk_mtup.probe_size = 0;
1772 if (icsk->icsk_mtup.enabled)
1773 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
1774}
1775EXPORT_SYMBOL(tcp_mtup_init);
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
1800{
1801 struct tcp_sock *tp = tcp_sk(sk);
1802 struct inet_connection_sock *icsk = inet_csk(sk);
1803 int mss_now;
1804
1805 if (icsk->icsk_mtup.search_high > pmtu)
1806 icsk->icsk_mtup.search_high = pmtu;
1807
1808 mss_now = tcp_mtu_to_mss(sk, pmtu);
1809 mss_now = tcp_bound_to_half_wnd(tp, mss_now);
1810
1811
1812 icsk->icsk_pmtu_cookie = pmtu;
1813 if (icsk->icsk_mtup.enabled)
1814 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1815 tp->mss_cache = mss_now;
1816
1817 return mss_now;
1818}
1819EXPORT_SYMBOL(tcp_sync_mss);
1820
1821
1822
1823
1824unsigned int tcp_current_mss(struct sock *sk)
1825{
1826 const struct tcp_sock *tp = tcp_sk(sk);
1827 const struct dst_entry *dst = __sk_dst_get(sk);
1828 u32 mss_now;
1829 unsigned int header_len;
1830 struct tcp_out_options opts;
1831 struct tcp_md5sig_key *md5;
1832
1833 mss_now = tp->mss_cache;
1834
1835 if (dst) {
1836 u32 mtu = dst_mtu(dst);
1837 if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
1838 mss_now = tcp_sync_mss(sk, mtu);
1839 }
1840
1841 header_len = tcp_established_options(sk, NULL, &opts, &md5) +
1842 sizeof(struct tcphdr);
1843
1844
1845
1846
1847 if (header_len != tp->tcp_header_len) {
1848 int delta = (int) header_len - tp->tcp_header_len;
1849 mss_now -= delta;
1850 }
1851
1852 return mss_now;
1853}
1854
1855
1856
1857
1858
1859static void tcp_cwnd_application_limited(struct sock *sk)
1860{
1861 struct tcp_sock *tp = tcp_sk(sk);
1862
1863 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
1864 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1865
1866 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
1867 u32 win_used = max(tp->snd_cwnd_used, init_win);
1868 if (win_used < tp->snd_cwnd) {
1869 tp->snd_ssthresh = tcp_current_ssthresh(sk);
1870 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
1871 }
1872 tp->snd_cwnd_used = 0;
1873 }
1874 tp->snd_cwnd_stamp = tcp_jiffies32;
1875}
1876
1877static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
1878{
1879 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1880 struct tcp_sock *tp = tcp_sk(sk);
1881
1882
1883
1884
1885 if (!before(tp->snd_una, tp->max_packets_seq) ||
1886 tp->packets_out > tp->max_packets_out ||
1887 is_cwnd_limited) {
1888 tp->max_packets_out = tp->packets_out;
1889 tp->max_packets_seq = tp->snd_nxt;
1890 tp->is_cwnd_limited = is_cwnd_limited;
1891 }
1892
1893 if (tcp_is_cwnd_limited(sk)) {
1894
1895 tp->snd_cwnd_used = 0;
1896 tp->snd_cwnd_stamp = tcp_jiffies32;
1897 } else {
1898
1899 if (tp->packets_out > tp->snd_cwnd_used)
1900 tp->snd_cwnd_used = tp->packets_out;
1901
1902 if (sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle &&
1903 (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
1904 !ca_ops->cong_control)
1905 tcp_cwnd_application_limited(sk);
1906
1907
1908
1909
1910
1911
1912
1913
1914 if (tcp_write_queue_empty(sk) && sk->sk_socket &&
1915 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
1916 (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
1917 tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED);
1918 }
1919}
1920
1921
1922static bool tcp_minshall_check(const struct tcp_sock *tp)
1923{
1924 return after(tp->snd_sml, tp->snd_una) &&
1925 !after(tp->snd_sml, tp->snd_nxt);
1926}
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1937 const struct sk_buff *skb)
1938{
1939 if (skb->len < tcp_skb_pcount(skb) * mss_now)
1940 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1941}
1942
1943
1944
1945
1946
1947
1948
1949
1950static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
1951 int nonagle)
1952{
1953 return partial &&
1954 ((nonagle & TCP_NAGLE_CORK) ||
1955 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1956}
1957
1958
1959
1960
1961static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
1962 int min_tso_segs)
1963{
1964 u32 bytes, segs;
1965
1966 bytes = min_t(unsigned long,
1967 sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
1968 sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
1969
1970
1971
1972
1973
1974
1975 segs = max_t(u32, bytes / mss_now, min_tso_segs);
1976
1977 return segs;
1978}
1979
1980
1981
1982
1983static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
1984{
1985 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1986 u32 min_tso, tso_segs;
1987
1988 min_tso = ca_ops->min_tso_segs ?
1989 ca_ops->min_tso_segs(sk) :
1990 sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;
1991
1992 tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
1993 return min_t(u32, tso_segs, sk->sk_gso_max_segs);
1994}
1995
1996
1997static unsigned int tcp_mss_split_point(const struct sock *sk,
1998 const struct sk_buff *skb,
1999 unsigned int mss_now,
2000 unsigned int max_segs,
2001 int nonagle)
2002{
2003 const struct tcp_sock *tp = tcp_sk(sk);
2004 u32 partial, needed, window, max_len;
2005
2006 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
2007 max_len = mss_now * max_segs;
2008
2009 if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
2010 return max_len;
2011
2012 needed = min(skb->len, window);
2013
2014 if (max_len <= needed)
2015 return max_len;
2016
2017 partial = needed % mss_now;
2018
2019
2020
2021
2022 if (tcp_nagle_check(partial != 0, tp, nonagle))
2023 return needed - partial;
2024
2025 return needed;
2026}
2027
2028
2029
2030
2031static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
2032 const struct sk_buff *skb)
2033{
2034 u32 in_flight, cwnd, halfcwnd;
2035
2036
2037 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
2038 tcp_skb_pcount(skb) == 1)
2039 return 1;
2040
2041 in_flight = tcp_packets_in_flight(tp);
2042 cwnd = tp->snd_cwnd;
2043 if (in_flight >= cwnd)
2044 return 0;
2045
2046
2047
2048
2049 halfcwnd = max(cwnd >> 1, 1U);
2050 return min(halfcwnd, cwnd - in_flight);
2051}
2052
2053
2054
2055
2056
2057static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
2058{
2059 int tso_segs = tcp_skb_pcount(skb);
2060
2061 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
2062 tcp_set_skb_tso_segs(skb, mss_now);
2063 tso_segs = tcp_skb_pcount(skb);
2064 }
2065 return tso_segs;
2066}
2067
2068
2069
2070
2071
2072static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
2073 unsigned int cur_mss, int nonagle)
2074{
2075
2076
2077
2078
2079
2080
2081 if (nonagle & TCP_NAGLE_PUSH)
2082 return true;
2083
2084
2085 if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
2086 return true;
2087
2088 if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
2089 return true;
2090
2091 return false;
2092}
2093
2094
2095static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
2096 const struct sk_buff *skb,
2097 unsigned int cur_mss)
2098{
2099 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
2100
2101 if (skb->len > cur_mss)
2102 end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
2103
2104 return !after(end_seq, tcp_wnd_end(tp));
2105}
2106
2107
2108
2109
2110
2111
2112
2113
2114static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
2115 unsigned int mss_now, gfp_t gfp)
2116{
2117 int nlen = skb->len - len;
2118 struct sk_buff *buff;
2119 u8 flags;
2120
2121
2122 if (skb->len != skb->data_len)
2123 return tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
2124 skb, len, mss_now, gfp);
2125
2126 buff = sk_stream_alloc_skb(sk, 0, gfp, true);
2127 if (unlikely(!buff))
2128 return -ENOMEM;
2129 skb_copy_decrypted(buff, skb);
2130 mptcp_skb_ext_copy(buff, skb);
2131
2132 sk_wmem_queued_add(sk, buff->truesize);
2133 sk_mem_charge(sk, buff->truesize);
2134 buff->truesize += nlen;
2135 skb->truesize -= nlen;
2136
2137
2138 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
2139 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
2140 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
2141
2142
2143 flags = TCP_SKB_CB(skb)->tcp_flags;
2144 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
2145 TCP_SKB_CB(buff)->tcp_flags = flags;
2146
2147
2148 TCP_SKB_CB(buff)->sacked = 0;
2149
2150 tcp_skb_fragment_eor(skb, buff);
2151
2152 buff->ip_summed = CHECKSUM_PARTIAL;
2153 skb_split(skb, buff, len);
2154 tcp_fragment_tstamp(skb, buff);
2155
2156
2157 tcp_set_skb_tso_segs(skb, mss_now);
2158 tcp_set_skb_tso_segs(buff, mss_now);
2159
2160
2161 __skb_header_release(buff);
2162 tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE);
2163
2164 return 0;
2165}
2166
2167
2168
2169
2170
2171
2172static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
2173 bool *is_cwnd_limited,
2174 bool *is_rwnd_limited,
2175 u32 max_segs)
2176{
2177 const struct inet_connection_sock *icsk = inet_csk(sk);
2178 u32 send_win, cong_win, limit, in_flight;
2179 struct tcp_sock *tp = tcp_sk(sk);
2180 struct sk_buff *head;
2181 int win_divisor;
2182 s64 delta;
2183
2184 if (icsk->icsk_ca_state >= TCP_CA_Recovery)
2185 goto send_now;
2186
2187
2188
2189
2190
2191
2192 delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC;
2193 if (delta > 0)
2194 goto send_now;
2195
2196 in_flight = tcp_packets_in_flight(tp);
2197
2198 BUG_ON(tcp_skb_pcount(skb) <= 1);
2199 BUG_ON(tp->snd_cwnd <= in_flight);
2200
2201 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
2202
2203
2204 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
2205
2206 limit = min(send_win, cong_win);
2207
2208
2209 if (limit >= max_segs * tp->mss_cache)
2210 goto send_now;
2211
2212
2213 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
2214 goto send_now;
2215
2216 win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
2217 if (win_divisor) {
2218 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
2219
2220
2221
2222
2223 chunk /= win_divisor;
2224 if (limit >= chunk)
2225 goto send_now;
2226 } else {
2227
2228
2229
2230
2231
2232 if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
2233 goto send_now;
2234 }
2235
2236
2237 head = tcp_rtx_queue_head(sk);
2238 if (!head)
2239 goto send_now;
2240 delta = tp->tcp_clock_cache - head->tstamp;
2241
2242 if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
2243 goto send_now;
2244
2245
2246
2247
2248
2249
2250
2251 if (cong_win < send_win) {
2252 if (cong_win <= skb->len) {
2253 *is_cwnd_limited = true;
2254 return true;
2255 }
2256 } else {
2257 if (send_win <= skb->len) {
2258 *is_rwnd_limited = true;
2259 return true;
2260 }
2261 }
2262
2263
2264 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) ||
2265 TCP_SKB_CB(skb)->eor)
2266 goto send_now;
2267
2268 return true;
2269
2270send_now:
2271 return false;
2272}
2273
2274static inline void tcp_mtu_check_reprobe(struct sock *sk)
2275{
2276 struct inet_connection_sock *icsk = inet_csk(sk);
2277 struct tcp_sock *tp = tcp_sk(sk);
2278 struct net *net = sock_net(sk);
2279 u32 interval;
2280 s32 delta;
2281
2282 interval = net->ipv4.sysctl_tcp_probe_interval;
2283 delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;
2284 if (unlikely(delta >= interval * HZ)) {
2285 int mss = tcp_current_mss(sk);
2286
2287
2288 icsk->icsk_mtup.probe_size = 0;
2289 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
2290 sizeof(struct tcphdr) +
2291 icsk->icsk_af_ops->net_header_len;
2292 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
2293
2294
2295 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
2296 }
2297}
2298
2299static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
2300{
2301 struct sk_buff *skb, *next;
2302
2303 skb = tcp_send_head(sk);
2304 tcp_for_write_queue_from_safe(skb, next, sk) {
2305 if (len <= skb->len)
2306 break;
2307
2308 if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb))
2309 return false;
2310
2311 len -= skb->len;
2312 }
2313
2314 return true;
2315}
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326static int tcp_mtu_probe(struct sock *sk)
2327{
2328 struct inet_connection_sock *icsk = inet_csk(sk);
2329 struct tcp_sock *tp = tcp_sk(sk);
2330 struct sk_buff *skb, *nskb, *next;
2331 struct net *net = sock_net(sk);
2332 int probe_size;
2333 int size_needed;
2334 int copy, len;
2335 int mss_now;
2336 int interval;
2337
2338
2339
2340
2341
2342
2343 if (likely(!icsk->icsk_mtup.enabled ||
2344 icsk->icsk_mtup.probe_size ||
2345 inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
2346 tp->snd_cwnd < 11 ||
2347 tp->rx_opt.num_sacks || tp->rx_opt.dsack))
2348 return -1;
2349
2350
2351
2352
2353
2354 mss_now = tcp_current_mss(sk);
2355 probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
2356 icsk->icsk_mtup.search_low) >> 1);
2357 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
2358 interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
2359
2360
2361
2362
2363 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
2364 interval < net->ipv4.sysctl_tcp_probe_threshold) {
2365
2366
2367
2368 tcp_mtu_check_reprobe(sk);
2369 return -1;
2370 }
2371
2372
2373 if (tp->write_seq - tp->snd_nxt < size_needed)
2374 return -1;
2375
2376 if (tp->snd_wnd < size_needed)
2377 return -1;
2378 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
2379 return 0;
2380
2381
2382 if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
2383 if (!tcp_packets_in_flight(tp))
2384 return -1;
2385 else
2386 return 0;
2387 }
2388
2389 if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
2390 return -1;
2391
2392
2393 nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
2394 if (!nskb)
2395 return -1;
2396 sk_wmem_queued_add(sk, nskb->truesize);
2397 sk_mem_charge(sk, nskb->truesize);
2398
2399 skb = tcp_send_head(sk);
2400 skb_copy_decrypted(nskb, skb);
2401 mptcp_skb_ext_copy(nskb, skb);
2402
2403 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
2404 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
2405 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
2406 TCP_SKB_CB(nskb)->sacked = 0;
2407 nskb->csum = 0;
2408 nskb->ip_summed = CHECKSUM_PARTIAL;
2409
2410 tcp_insert_write_queue_before(nskb, skb, sk);
2411 tcp_highest_sack_replace(sk, skb, nskb);
2412
2413 len = 0;
2414 tcp_for_write_queue_from_safe(skb, next, sk) {
2415 copy = min_t(int, skb->len, probe_size - len);
2416 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
2417
2418 if (skb->len <= copy) {
2419
2420
2421 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
2422
2423
2424
2425 TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
2426 tcp_skb_collapse_tstamp(nskb, skb);
2427 tcp_unlink_write_queue(skb, sk);
2428 sk_wmem_free_skb(sk, skb);
2429 } else {
2430 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
2431 ~(TCPHDR_FIN|TCPHDR_PSH);
2432 if (!skb_shinfo(skb)->nr_frags) {
2433 skb_pull(skb, copy);
2434 } else {
2435 __pskb_trim_head(skb, copy);
2436 tcp_set_skb_tso_segs(skb, mss_now);
2437 }
2438 TCP_SKB_CB(skb)->seq += copy;
2439 }
2440
2441 len += copy;
2442
2443 if (len >= probe_size)
2444 break;
2445 }
2446 tcp_init_tso_segs(nskb, nskb->len);
2447
2448
2449
2450
2451 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
2452
2453
2454 tp->snd_cwnd--;
2455 tcp_event_new_data_sent(sk, nskb);
2456
2457 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
2458 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
2459 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
2460
2461 return 1;
2462 }
2463
2464 return -1;
2465}
2466
2467static bool tcp_pacing_check(struct sock *sk)
2468{
2469 struct tcp_sock *tp = tcp_sk(sk);
2470
2471 if (!tcp_needs_internal_pacing(sk))
2472 return false;
2473
2474 if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache)
2475 return false;
2476
2477 if (!hrtimer_is_queued(&tp->pacing_timer)) {
2478 hrtimer_start(&tp->pacing_timer,
2479 ns_to_ktime(tp->tcp_wstamp_ns),
2480 HRTIMER_MODE_ABS_PINNED_SOFT);
2481 sock_hold(sk);
2482 }
2483 return true;
2484}
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2498 unsigned int factor)
2499{
2500 unsigned long limit;
2501
2502 limit = max_t(unsigned long,
2503 2 * skb->truesize,
2504 sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));
2505 if (sk->sk_pacing_status == SK_PACING_NONE)
2506 limit = min_t(unsigned long, limit,
2507 sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
2508 limit <<= factor;
2509
2510 if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
2511 tcp_sk(sk)->tcp_tx_delay) {
2512 u64 extra_bytes = (u64)sk->sk_pacing_rate * tcp_sk(sk)->tcp_tx_delay;
2513
2514
2515
2516
2517
2518
2519 extra_bytes >>= (20 - 1);
2520 limit += extra_bytes;
2521 }
2522 if (refcount_read(&sk->sk_wmem_alloc) > limit) {
2523
2524
2525
2526
2527
2528 if (tcp_rtx_queue_empty(sk))
2529 return false;
2530
2531 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2532
2533
2534
2535
2536 smp_mb__after_atomic();
2537 if (refcount_read(&sk->sk_wmem_alloc) > limit)
2538 return true;
2539 }
2540 return false;
2541}
2542
2543static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
2544{
2545 const u32 now = tcp_jiffies32;
2546 enum tcp_chrono old = tp->chrono_type;
2547
2548 if (old > TCP_CHRONO_UNSPEC)
2549 tp->chrono_stat[old - 1] += now - tp->chrono_start;
2550 tp->chrono_start = now;
2551 tp->chrono_type = new;
2552}
2553
2554void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
2555{
2556 struct tcp_sock *tp = tcp_sk(sk);
2557
2558
2559
2560
2561
2562
2563 if (type > tp->chrono_type)
2564 tcp_chrono_set(tp, type);
2565}
2566
2567void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
2568{
2569 struct tcp_sock *tp = tcp_sk(sk);
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579 if (tcp_rtx_and_write_queues_empty(sk))
2580 tcp_chrono_set(tp, TCP_CHRONO_UNSPEC);
2581 else if (type == tp->chrono_type)
2582 tcp_chrono_set(tp, TCP_CHRONO_BUSY);
2583}
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2600 int push_one, gfp_t gfp)
2601{
2602 struct tcp_sock *tp = tcp_sk(sk);
2603 struct sk_buff *skb;
2604 unsigned int tso_segs, sent_pkts;
2605 int cwnd_quota;
2606 int result;
2607 bool is_cwnd_limited = false, is_rwnd_limited = false;
2608 u32 max_segs;
2609
2610 sent_pkts = 0;
2611
2612 tcp_mstamp_refresh(tp);
2613 if (!push_one) {
2614
2615 result = tcp_mtu_probe(sk);
2616 if (!result) {
2617 return false;
2618 } else if (result > 0) {
2619 sent_pkts = 1;
2620 }
2621 }
2622
2623 max_segs = tcp_tso_segs(sk, mss_now);
2624 while ((skb = tcp_send_head(sk))) {
2625 unsigned int limit;
2626
2627 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
2628
2629 skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache;
2630 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
2631 tcp_init_tso_segs(skb, mss_now);
2632 goto repair;
2633 }
2634
2635 if (tcp_pacing_check(sk))
2636 break;
2637
2638 tso_segs = tcp_init_tso_segs(skb, mss_now);
2639 BUG_ON(!tso_segs);
2640
2641 cwnd_quota = tcp_cwnd_test(tp, skb);
2642 if (!cwnd_quota) {
2643 if (push_one == 2)
2644
2645 cwnd_quota = 1;
2646 else
2647 break;
2648 }
2649
2650 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
2651 is_rwnd_limited = true;
2652 break;
2653 }
2654
2655 if (tso_segs == 1) {
2656 if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
2657 (tcp_skb_is_last(sk, skb) ?
2658 nonagle : TCP_NAGLE_PUSH))))
2659 break;
2660 } else {
2661 if (!push_one &&
2662 tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
2663 &is_rwnd_limited, max_segs))
2664 break;
2665 }
2666
2667 limit = mss_now;
2668 if (tso_segs > 1 && !tcp_urg_mode(tp))
2669 limit = tcp_mss_split_point(sk, skb, mss_now,
2670 min_t(unsigned int,
2671 cwnd_quota,
2672 max_segs),
2673 nonagle);
2674
2675 if (skb->len > limit &&
2676 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
2677 break;
2678
2679 if (tcp_small_queue_check(sk, skb, 0))
2680 break;
2681
2682
2683
2684
2685
2686
2687 if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq)
2688 break;
2689
2690 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
2691 break;
2692
2693repair:
2694
2695
2696
2697 tcp_event_new_data_sent(sk, skb);
2698
2699 tcp_minshall_update(tp, mss_now, skb);
2700 sent_pkts += tcp_skb_pcount(skb);
2701
2702 if (push_one)
2703 break;
2704 }
2705
2706 if (is_rwnd_limited)
2707 tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED);
2708 else
2709 tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
2710
2711 is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
2712 if (likely(sent_pkts || is_cwnd_limited))
2713 tcp_cwnd_validate(sk, is_cwnd_limited);
2714
2715 if (likely(sent_pkts)) {
2716 if (tcp_in_cwnd_reduction(sk))
2717 tp->prr_out += sent_pkts;
2718
2719
2720 if (push_one != 2)
2721 tcp_schedule_loss_probe(sk, false);
2722 return false;
2723 }
2724 return !tp->packets_out && !tcp_write_queue_empty(sk);
2725}
2726
2727bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
2728{
2729 struct inet_connection_sock *icsk = inet_csk(sk);
2730 struct tcp_sock *tp = tcp_sk(sk);
2731 u32 timeout, rto_delta_us;
2732 int early_retrans;
2733
2734
2735
2736
2737 if (rcu_access_pointer(tp->fastopen_rsk))
2738 return false;
2739
2740 early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans;
2741
2742
2743
2744 if ((early_retrans != 3 && early_retrans != 4) ||
2745 !tp->packets_out || !tcp_is_sack(tp) ||
2746 (icsk->icsk_ca_state != TCP_CA_Open &&
2747 icsk->icsk_ca_state != TCP_CA_CWR))
2748 return false;
2749
2750
2751
2752
2753
2754 if (tp->srtt_us) {
2755 timeout = usecs_to_jiffies(tp->srtt_us >> 2);
2756 if (tp->packets_out == 1)
2757 timeout += TCP_RTO_MIN;
2758 else
2759 timeout += TCP_TIMEOUT_MIN;
2760 } else {
2761 timeout = TCP_TIMEOUT_INIT;
2762 }
2763
2764
2765 rto_delta_us = advancing_rto ?
2766 jiffies_to_usecs(inet_csk(sk)->icsk_rto) :
2767 tcp_rto_delta_us(sk);
2768 if (rto_delta_us > 0)
2769 timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
2770
2771 tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, TCP_RTO_MAX);
2772 return true;
2773}
2774
2775
2776
2777
2778
2779static bool skb_still_in_host_queue(struct sock *sk,
2780 const struct sk_buff *skb)
2781{
2782 if (unlikely(skb_fclone_busy(sk, skb))) {
2783 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2784 smp_mb__after_atomic();
2785 if (skb_fclone_busy(sk, skb)) {
2786 NET_INC_STATS(sock_net(sk),
2787 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
2788 return true;
2789 }
2790 }
2791 return false;
2792}
2793
2794
2795
2796
2797void tcp_send_loss_probe(struct sock *sk)
2798{
2799 struct tcp_sock *tp = tcp_sk(sk);
2800 struct sk_buff *skb;
2801 int pcount;
2802 int mss = tcp_current_mss(sk);
2803
2804
2805 if (tp->tlp_high_seq)
2806 goto rearm_timer;
2807
2808 tp->tlp_retrans = 0;
2809 skb = tcp_send_head(sk);
2810 if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
2811 pcount = tp->packets_out;
2812 tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2813 if (tp->packets_out > pcount)
2814 goto probe_sent;
2815 goto rearm_timer;
2816 }
2817 skb = skb_rb_last(&sk->tcp_rtx_queue);
2818 if (unlikely(!skb)) {
2819 WARN_ONCE(tp->packets_out,
2820 "invalid inflight: %u state %u cwnd %u mss %d\n",
2821 tp->packets_out, sk->sk_state, tp->snd_cwnd, mss);
2822 inet_csk(sk)->icsk_pending = 0;
2823 return;
2824 }
2825
2826 if (skb_still_in_host_queue(sk, skb))
2827 goto rearm_timer;
2828
2829 pcount = tcp_skb_pcount(skb);
2830 if (WARN_ON(!pcount))
2831 goto rearm_timer;
2832
2833 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
2834 if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
2835 (pcount - 1) * mss, mss,
2836 GFP_ATOMIC)))
2837 goto rearm_timer;
2838 skb = skb_rb_next(skb);
2839 }
2840
2841 if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
2842 goto rearm_timer;
2843
2844 if (__tcp_retransmit_skb(sk, skb, 1))
2845 goto rearm_timer;
2846
2847 tp->tlp_retrans = 1;
2848
2849probe_sent:
2850
2851 tp->tlp_high_seq = tp->snd_nxt;
2852
2853 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
2854
2855 inet_csk(sk)->icsk_pending = 0;
2856rearm_timer:
2857 tcp_rearm_rto(sk);
2858}
2859
2860
2861
2862
2863
2864void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
2865 int nonagle)
2866{
2867
2868
2869
2870
2871 if (unlikely(sk->sk_state == TCP_CLOSE))
2872 return;
2873
2874 if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
2875 sk_gfp_mask(sk, GFP_ATOMIC)))
2876 tcp_check_probe_timer(sk);
2877}
2878
2879
2880
2881
2882void tcp_push_one(struct sock *sk, unsigned int mss_now)
2883{
2884 struct sk_buff *skb = tcp_send_head(sk);
2885
2886 BUG_ON(!skb || skb->len < mss_now);
2887
2888 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2889}
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943u32 __tcp_select_window(struct sock *sk)
2944{
2945 struct inet_connection_sock *icsk = inet_csk(sk);
2946 struct tcp_sock *tp = tcp_sk(sk);
2947
2948
2949
2950
2951
2952
2953 int mss = icsk->icsk_ack.rcv_mss;
2954 int free_space = tcp_space(sk);
2955 int allowed_space = tcp_full_space(sk);
2956 int full_space, window;
2957
2958 if (sk_is_mptcp(sk))
2959 mptcp_space(sk, &free_space, &allowed_space);
2960
2961 full_space = min_t(int, tp->window_clamp, allowed_space);
2962
2963 if (unlikely(mss > full_space)) {
2964 mss = full_space;
2965 if (mss <= 0)
2966 return 0;
2967 }
2968 if (free_space < (full_space >> 1)) {
2969 icsk->icsk_ack.quick = 0;
2970
2971 if (tcp_under_memory_pressure(sk))
2972 tp->rcv_ssthresh = min(tp->rcv_ssthresh,
2973 4U * tp->advmss);
2974
2975
2976
2977
2978 free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
2979
2980
2981
2982
2983
2984
2985
2986
2987 if (free_space < (allowed_space >> 4) || free_space < mss)
2988 return 0;
2989 }
2990
2991 if (free_space > tp->rcv_ssthresh)
2992 free_space = tp->rcv_ssthresh;
2993
2994
2995
2996
2997 if (tp->rx_opt.rcv_wscale) {
2998 window = free_space;
2999
3000
3001
3002
3003
3004 window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale));
3005 } else {
3006 window = tp->rcv_wnd;
3007
3008
3009
3010
3011
3012
3013
3014
3015 if (window <= free_space - mss || window > free_space)
3016 window = rounddown(free_space, mss);
3017 else if (mss == full_space &&
3018 free_space > window + (full_space >> 1))
3019 window = free_space;
3020 }
3021
3022 return window;
3023}
3024
3025void tcp_skb_collapse_tstamp(struct sk_buff *skb,
3026 const struct sk_buff *next_skb)
3027{
3028 if (unlikely(tcp_has_tx_tstamp(next_skb))) {
3029 const struct skb_shared_info *next_shinfo =
3030 skb_shinfo(next_skb);
3031 struct skb_shared_info *shinfo = skb_shinfo(skb);
3032
3033 shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
3034 shinfo->tskey = next_shinfo->tskey;
3035 TCP_SKB_CB(skb)->txstamp_ack |=
3036 TCP_SKB_CB(next_skb)->txstamp_ack;
3037 }
3038}
3039
3040
3041static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
3042{
3043 struct tcp_sock *tp = tcp_sk(sk);
3044 struct sk_buff *next_skb = skb_rb_next(skb);
3045 int next_skb_size;
3046
3047 next_skb_size = next_skb->len;
3048
3049 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
3050
3051 if (next_skb_size) {
3052 if (next_skb_size <= skb_availroom(skb))
3053 skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size),
3054 next_skb_size);
3055 else if (!tcp_skb_shift(skb, next_skb, 1, next_skb_size))
3056 return false;
3057 }
3058 tcp_highest_sack_replace(sk, next_skb, skb);
3059
3060
3061 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
3062
3063
3064 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
3065
3066
3067
3068
3069 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
3070 TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
3071
3072
3073 tcp_clear_retrans_hints_partial(tp);
3074 if (next_skb == tp->retransmit_skb_hint)
3075 tp->retransmit_skb_hint = skb;
3076
3077 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
3078
3079 tcp_skb_collapse_tstamp(skb, next_skb);
3080
3081 tcp_rtx_queue_unlink_and_free(next_skb, sk);
3082 return true;
3083}
3084
3085
3086static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
3087{
3088 if (tcp_skb_pcount(skb) > 1)
3089 return false;
3090 if (skb_cloned(skb))
3091 return false;
3092
3093 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
3094 return false;
3095
3096 return true;
3097}
3098
3099
3100
3101
3102static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
3103 int space)
3104{
3105 struct tcp_sock *tp = tcp_sk(sk);
3106 struct sk_buff *skb = to, *tmp;
3107 bool first = true;
3108
3109 if (!sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)
3110 return;
3111 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
3112 return;
3113
3114 skb_rbtree_walk_from_safe(skb, tmp) {
3115 if (!tcp_can_collapse(sk, skb))
3116 break;
3117
3118 if (!tcp_skb_can_collapse(to, skb))
3119 break;
3120
3121 space -= skb->len;
3122
3123 if (first) {
3124 first = false;
3125 continue;
3126 }
3127
3128 if (space < 0)
3129 break;
3130
3131 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
3132 break;
3133
3134 if (!tcp_collapse_retrans(sk, to))
3135 break;
3136 }
3137}
3138
3139
3140
3141
3142
3143int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
3144{
3145 struct inet_connection_sock *icsk = inet_csk(sk);
3146 struct tcp_sock *tp = tcp_sk(sk);
3147 unsigned int cur_mss;
3148 int diff, len, err;
3149
3150
3151
3152 if (icsk->icsk_mtup.probe_size)
3153 icsk->icsk_mtup.probe_size = 0;
3154
3155 if (skb_still_in_host_queue(sk, skb))
3156 return -EBUSY;
3157
3158 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
3159 if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
3160 WARN_ON_ONCE(1);
3161 return -EINVAL;
3162 }
3163 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
3164 return -ENOMEM;
3165 }
3166
3167 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
3168 return -EHOSTUNREACH;
3169
3170 cur_mss = tcp_current_mss(sk);
3171
3172
3173
3174
3175
3176
3177 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
3178 TCP_SKB_CB(skb)->seq != tp->snd_una)
3179 return -EAGAIN;
3180
3181 len = cur_mss * segs;
3182 if (skb->len > len) {
3183 if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
3184 cur_mss, GFP_ATOMIC))
3185 return -ENOMEM;
3186 } else {
3187 if (skb_unclone(skb, GFP_ATOMIC))
3188 return -ENOMEM;
3189
3190 diff = tcp_skb_pcount(skb);
3191 tcp_set_skb_tso_segs(skb, cur_mss);
3192 diff -= tcp_skb_pcount(skb);
3193 if (diff)
3194 tcp_adjust_pcount(sk, skb, diff);
3195 if (skb->len < cur_mss)
3196 tcp_retrans_try_collapse(sk, skb, cur_mss);
3197 }
3198
3199
3200 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
3201 tcp_ecn_clear_syn(sk, skb);
3202
3203
3204 segs = tcp_skb_pcount(skb);
3205 TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
3206 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
3207 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
3208 tp->total_retrans += segs;
3209 tp->bytes_retrans += skb->len;
3210
3211
3212
3213
3214
3215 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
3216 skb_headroom(skb) >= 0xFFFF)) {
3217 struct sk_buff *nskb;
3218
3219 tcp_skb_tsorted_save(skb) {
3220 nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
3221 if (nskb) {
3222 nskb->dev = NULL;
3223 err = tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC);
3224 } else {
3225 err = -ENOBUFS;
3226 }
3227 } tcp_skb_tsorted_restore(skb);
3228
3229 if (!err) {
3230 tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns);
3231 tcp_rate_skb_sent(sk, skb);
3232 }
3233 } else {
3234 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3235 }
3236
3237
3238
3239
3240 TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
3241
3242 if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG))
3243 tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
3244 TCP_SKB_CB(skb)->seq, segs, err);
3245
3246 if (likely(!err)) {
3247 trace_tcp_retransmit_skb(sk, skb);
3248 } else if (err != -EBUSY) {
3249 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
3250 }
3251 return err;
3252}
3253
3254int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
3255{
3256 struct tcp_sock *tp = tcp_sk(sk);
3257 int err = __tcp_retransmit_skb(sk, skb, segs);
3258
3259 if (err == 0) {
3260#if FASTRETRANS_DEBUG > 0
3261 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
3262 net_dbg_ratelimited("retrans_out leaked\n");
3263 }
3264#endif
3265 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
3266 tp->retrans_out += tcp_skb_pcount(skb);
3267 }
3268
3269
3270 if (!tp->retrans_stamp)
3271 tp->retrans_stamp = tcp_skb_timestamp(skb);
3272
3273 if (tp->undo_retrans < 0)
3274 tp->undo_retrans = 0;
3275 tp->undo_retrans += tcp_skb_pcount(skb);
3276 return err;
3277}
3278
3279
3280
3281
3282
3283
3284void tcp_xmit_retransmit_queue(struct sock *sk)
3285{
3286 const struct inet_connection_sock *icsk = inet_csk(sk);
3287 struct sk_buff *skb, *rtx_head, *hole = NULL;
3288 struct tcp_sock *tp = tcp_sk(sk);
3289 bool rearm_timer = false;
3290 u32 max_segs;
3291 int mib_idx;
3292
3293 if (!tp->packets_out)
3294 return;
3295
3296 rtx_head = tcp_rtx_queue_head(sk);
3297 skb = tp->retransmit_skb_hint ?: rtx_head;
3298 max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
3299 skb_rbtree_walk_from(skb) {
3300 __u8 sacked;
3301 int segs;
3302
3303 if (tcp_pacing_check(sk))
3304 break;
3305
3306
3307 if (!hole)
3308 tp->retransmit_skb_hint = skb;
3309
3310 segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
3311 if (segs <= 0)
3312 break;
3313 sacked = TCP_SKB_CB(skb)->sacked;
3314
3315
3316
3317 segs = min_t(int, segs, max_segs);
3318
3319 if (tp->retrans_out >= tp->lost_out) {
3320 break;
3321 } else if (!(sacked & TCPCB_LOST)) {
3322 if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
3323 hole = skb;
3324 continue;
3325
3326 } else {
3327 if (icsk->icsk_ca_state != TCP_CA_Loss)
3328 mib_idx = LINUX_MIB_TCPFASTRETRANS;
3329 else
3330 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
3331 }
3332
3333 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
3334 continue;
3335
3336 if (tcp_small_queue_check(sk, skb, 1))
3337 break;
3338
3339 if (tcp_retransmit_skb(sk, skb, segs))
3340 break;
3341
3342 NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
3343
3344 if (tcp_in_cwnd_reduction(sk))
3345 tp->prr_out += tcp_skb_pcount(skb);
3346
3347 if (skb == rtx_head &&
3348 icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
3349 rearm_timer = true;
3350
3351 }
3352 if (rearm_timer)
3353 tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
3354 inet_csk(sk)->icsk_rto,
3355 TCP_RTO_MAX);
3356}
3357
3358
3359
3360
3361
3362
3363
3364
3365void sk_forced_mem_schedule(struct sock *sk, int size)
3366{
3367 int amt;
3368
3369 if (size <= sk->sk_forward_alloc)
3370 return;
3371 amt = sk_mem_pages(size);
3372 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
3373 sk_memory_allocated_add(sk, amt);
3374
3375 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
3376 mem_cgroup_charge_skmem(sk->sk_memcg, amt);
3377}
3378
3379
3380
3381
3382void tcp_send_fin(struct sock *sk)
3383{
3384 struct sk_buff *skb, *tskb, *tail = tcp_write_queue_tail(sk);
3385 struct tcp_sock *tp = tcp_sk(sk);
3386
3387
3388
3389
3390
3391
3392 tskb = tail;
3393 if (!tskb && tcp_under_memory_pressure(sk))
3394 tskb = skb_rb_last(&sk->tcp_rtx_queue);
3395
3396 if (tskb) {
3397 TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
3398 TCP_SKB_CB(tskb)->end_seq++;
3399 tp->write_seq++;
3400 if (!tail) {
3401
3402
3403
3404
3405
3406
3407 WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1);
3408 return;
3409 }
3410 } else {
3411 skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
3412 if (unlikely(!skb))
3413 return;
3414
3415 INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
3416 skb_reserve(skb, MAX_TCP_HEADER);
3417 sk_forced_mem_schedule(sk, skb->truesize);
3418
3419 tcp_init_nondata_skb(skb, tp->write_seq,
3420 TCPHDR_ACK | TCPHDR_FIN);
3421 tcp_queue_skb(sk, skb);
3422 }
3423 __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
3424}
3425
3426
3427
3428
3429
3430
3431void tcp_send_active_reset(struct sock *sk, gfp_t priority)
3432{
3433 struct sk_buff *skb;
3434
3435 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
3436
3437
3438 skb = alloc_skb(MAX_TCP_HEADER, priority);
3439 if (!skb) {
3440 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3441 return;
3442 }
3443
3444
3445 skb_reserve(skb, MAX_TCP_HEADER);
3446 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
3447 TCPHDR_ACK | TCPHDR_RST);
3448 tcp_mstamp_refresh(tcp_sk(sk));
3449
3450 if (tcp_transmit_skb(sk, skb, 0, priority))
3451 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3452
3453
3454
3455
3456 trace_tcp_send_reset(sk, NULL);
3457}
3458
3459
3460
3461
3462
3463
3464
3465int tcp_send_synack(struct sock *sk)
3466{
3467 struct sk_buff *skb;
3468
3469 skb = tcp_rtx_queue_head(sk);
3470 if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
3471 pr_err("%s: wrong queue state\n", __func__);
3472 return -EFAULT;
3473 }
3474 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
3475 if (skb_cloned(skb)) {
3476 struct sk_buff *nskb;
3477
3478 tcp_skb_tsorted_save(skb) {
3479 nskb = skb_copy(skb, GFP_ATOMIC);
3480 } tcp_skb_tsorted_restore(skb);
3481 if (!nskb)
3482 return -ENOMEM;
3483 INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
3484 tcp_highest_sack_replace(sk, skb, nskb);
3485 tcp_rtx_queue_unlink_and_free(skb, sk);
3486 __skb_header_release(nskb);
3487 tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
3488 sk_wmem_queued_add(sk, nskb->truesize);
3489 sk_mem_charge(sk, nskb->truesize);
3490 skb = nskb;
3491 }
3492
3493 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
3494 tcp_ecn_send_synack(sk, skb);
3495 }
3496 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3497}
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
3510 struct request_sock *req,
3511 struct tcp_fastopen_cookie *foc,
3512 enum tcp_synack_type synack_type,
3513 struct sk_buff *syn_skb)
3514{
3515 struct inet_request_sock *ireq = inet_rsk(req);
3516 const struct tcp_sock *tp = tcp_sk(sk);
3517 struct tcp_md5sig_key *md5 = NULL;
3518 struct tcp_out_options opts;
3519 struct sk_buff *skb;
3520 int tcp_header_size;
3521 struct tcphdr *th;
3522 int mss;
3523 u64 now;
3524
3525 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
3526 if (unlikely(!skb)) {
3527 dst_release(dst);
3528 return NULL;
3529 }
3530
3531 skb_reserve(skb, MAX_TCP_HEADER);
3532
3533 switch (synack_type) {
3534 case TCP_SYNACK_NORMAL:
3535 skb_set_owner_w(skb, req_to_sk(req));
3536 break;
3537 case TCP_SYNACK_COOKIE:
3538
3539
3540
3541 break;
3542 case TCP_SYNACK_FASTOPEN:
3543
3544
3545
3546
3547 skb_set_owner_w(skb, (struct sock *)sk);
3548 break;
3549 }
3550 skb_dst_set(skb, dst);
3551
3552 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3553
3554 memset(&opts, 0, sizeof(opts));
3555 now = tcp_clock_ns();
3556#ifdef CONFIG_SYN_COOKIES
3557 if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok))
3558 skb->skb_mstamp_ns = cookie_init_timestamp(req, now);
3559 else
3560#endif
3561 {
3562 skb->skb_mstamp_ns = now;
3563 if (!tcp_rsk(req)->snt_synack)
3564 tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb);
3565 }
3566
3567#ifdef CONFIG_TCP_MD5SIG
3568 rcu_read_lock();
3569 md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
3570#endif
3571 skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
3572
3573 TCP_SKB_CB(skb)->tcp_flags = TCPHDR_SYN | TCPHDR_ACK;
3574 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
3575 foc, synack_type,
3576 syn_skb) + sizeof(*th);
3577
3578 skb_push(skb, tcp_header_size);
3579 skb_reset_transport_header(skb);
3580
3581 th = (struct tcphdr *)skb->data;
3582 memset(th, 0, sizeof(struct tcphdr));
3583 th->syn = 1;
3584 th->ack = 1;
3585 tcp_ecn_make_synack(req, th);
3586 th->source = htons(ireq->ir_num);
3587 th->dest = ireq->ir_rmt_port;
3588 skb->mark = ireq->ir_mark;
3589 skb->ip_summed = CHECKSUM_PARTIAL;
3590 th->seq = htonl(tcp_rsk(req)->snt_isn);
3591
3592 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
3593
3594
3595 th->window = htons(min(req->rsk_rcv_wnd, 65535U));
3596 tcp_options_write((__be32 *)(th + 1), NULL, &opts);
3597 th->doff = (tcp_header_size >> 2);
3598 __TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
3599
3600#ifdef CONFIG_TCP_MD5SIG
3601
3602 if (md5)
3603 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
3604 md5, req_to_sk(req), skb);
3605 rcu_read_unlock();
3606#endif
3607
3608 bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb,
3609 synack_type, &opts);
3610
3611 skb->skb_mstamp_ns = now;
3612 tcp_add_tx_delay(skb, tp);
3613
3614 return skb;
3615}
3616EXPORT_SYMBOL(tcp_make_synack);
3617
3618static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
3619{
3620 struct inet_connection_sock *icsk = inet_csk(sk);
3621 const struct tcp_congestion_ops *ca;
3622 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
3623
3624 if (ca_key == TCP_CA_UNSPEC)
3625 return;
3626
3627 rcu_read_lock();
3628 ca = tcp_ca_find_key(ca_key);
3629 if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
3630 bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner);
3631 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
3632 icsk->icsk_ca_ops = ca;
3633 }
3634 rcu_read_unlock();
3635}
3636
3637
3638static void tcp_connect_init(struct sock *sk)
3639{
3640 const struct dst_entry *dst = __sk_dst_get(sk);
3641 struct tcp_sock *tp = tcp_sk(sk);
3642 __u8 rcv_wscale;
3643 u32 rcv_wnd;
3644
3645
3646
3647
3648 tp->tcp_header_len = sizeof(struct tcphdr);
3649 if (sock_net(sk)->ipv4.sysctl_tcp_timestamps)
3650 tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
3651
3652#ifdef CONFIG_TCP_MD5SIG
3653 if (tp->af_specific->md5_lookup(sk, sk))
3654 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
3655#endif
3656
3657
3658 if (tp->rx_opt.user_mss)
3659 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
3660 tp->max_window = 0;
3661 tcp_mtup_init(sk);
3662 tcp_sync_mss(sk, dst_mtu(dst));
3663
3664 tcp_ca_dst_init(sk, dst);
3665
3666 if (!tp->window_clamp)
3667 tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
3668 tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3669
3670 tcp_initialize_rcv_mss(sk);
3671
3672
3673 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
3674 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
3675 tp->window_clamp = tcp_full_space(sk);
3676
3677 rcv_wnd = tcp_rwnd_init_bpf(sk);
3678 if (rcv_wnd == 0)
3679 rcv_wnd = dst_metric(dst, RTAX_INITRWND);
3680
3681 tcp_select_initial_window(sk, tcp_full_space(sk),
3682 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
3683 &tp->rcv_wnd,
3684 &tp->window_clamp,
3685 sock_net(sk)->ipv4.sysctl_tcp_window_scaling,
3686 &rcv_wscale,
3687 rcv_wnd);
3688
3689 tp->rx_opt.rcv_wscale = rcv_wscale;
3690 tp->rcv_ssthresh = tp->rcv_wnd;
3691
3692 sk->sk_err = 0;
3693 sock_reset_flag(sk, SOCK_DONE);
3694 tp->snd_wnd = 0;
3695 tcp_init_wl(tp, 0);
3696 tcp_write_queue_purge(sk);
3697 tp->snd_una = tp->write_seq;
3698 tp->snd_sml = tp->write_seq;
3699 tp->snd_up = tp->write_seq;
3700 WRITE_ONCE(tp->snd_nxt, tp->write_seq);
3701
3702 if (likely(!tp->repair))
3703 tp->rcv_nxt = 0;
3704 else
3705 tp->rcv_tstamp = tcp_jiffies32;
3706 tp->rcv_wup = tp->rcv_nxt;
3707 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
3708
3709 inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
3710 inet_csk(sk)->icsk_retransmits = 0;
3711 tcp_clear_retrans(tp);
3712}
3713
3714static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
3715{
3716 struct tcp_sock *tp = tcp_sk(sk);
3717 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
3718
3719 tcb->end_seq += skb->len;
3720 __skb_header_release(skb);
3721 sk_wmem_queued_add(sk, skb->truesize);
3722 sk_mem_charge(sk, skb->truesize);
3723 WRITE_ONCE(tp->write_seq, tcb->end_seq);
3724 tp->packets_out += tcp_skb_pcount(skb);
3725}
3726
3727
3728
3729
3730
3731
3732
3733
3734static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3735{
3736 struct tcp_sock *tp = tcp_sk(sk);
3737 struct tcp_fastopen_request *fo = tp->fastopen_req;
3738 int space, err = 0;
3739 struct sk_buff *syn_data;
3740
3741 tp->rx_opt.mss_clamp = tp->advmss;
3742 if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie))
3743 goto fallback;
3744
3745
3746
3747
3748
3749 tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp);
3750
3751 space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
3752 MAX_TCP_OPTION_SPACE;
3753
3754 space = min_t(size_t, space, fo->size);
3755
3756
3757 space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
3758
3759 syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false);
3760 if (!syn_data)
3761 goto fallback;
3762 syn_data->ip_summed = CHECKSUM_PARTIAL;
3763 memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
3764 if (space) {
3765 int copied = copy_from_iter(skb_put(syn_data, space), space,
3766 &fo->data->msg_iter);
3767 if (unlikely(!copied)) {
3768 tcp_skb_tsorted_anchor_cleanup(syn_data);
3769 kfree_skb(syn_data);
3770 goto fallback;
3771 }
3772 if (copied != space) {
3773 skb_trim(syn_data, copied);
3774 space = copied;
3775 }
3776 skb_zcopy_set(syn_data, fo->uarg, NULL);
3777 }
3778
3779 if (space == fo->size)
3780 fo->data = NULL;
3781 fo->copied = space;
3782
3783 tcp_connect_queue_skb(sk, syn_data);
3784 if (syn_data->len)
3785 tcp_chrono_start(sk, TCP_CHRONO_BUSY);
3786
3787 err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
3788
3789 syn->skb_mstamp_ns = syn_data->skb_mstamp_ns;
3790
3791
3792
3793
3794
3795
3796 TCP_SKB_CB(syn_data)->seq++;
3797 TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
3798 if (!err) {
3799 tp->syn_data = (fo->copied > 0);
3800 tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data);
3801 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
3802 goto done;
3803 }
3804
3805
3806 __skb_queue_tail(&sk->sk_write_queue, syn_data);
3807 tp->packets_out -= tcp_skb_pcount(syn_data);
3808
3809fallback:
3810
3811 if (fo->cookie.len > 0)
3812 fo->cookie.len = 0;
3813 err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
3814 if (err)
3815 tp->syn_fastopen = 0;
3816done:
3817 fo->cookie.len = -1;
3818 return err;
3819}
3820
3821
3822int tcp_connect(struct sock *sk)
3823{
3824 struct tcp_sock *tp = tcp_sk(sk);
3825 struct sk_buff *buff;
3826 int err;
3827
3828 tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL);
3829
3830 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
3831 return -EHOSTUNREACH;
3832
3833 tcp_connect_init(sk);
3834
3835 if (unlikely(tp->repair)) {
3836 tcp_finish_connect(sk, NULL);
3837 return 0;
3838 }
3839
3840 buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true);
3841 if (unlikely(!buff))
3842 return -ENOBUFS;
3843
3844 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
3845 tcp_mstamp_refresh(tp);
3846 tp->retrans_stamp = tcp_time_stamp(tp);
3847 tcp_connect_queue_skb(sk, buff);
3848 tcp_ecn_send_syn(sk, buff);
3849 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
3850
3851
3852 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
3853 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
3854 if (err == -ECONNREFUSED)
3855 return err;
3856
3857
3858
3859
3860 WRITE_ONCE(tp->snd_nxt, tp->write_seq);
3861 tp->pushed_seq = tp->write_seq;
3862 buff = tcp_send_head(sk);
3863 if (unlikely(buff)) {
3864 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq);
3865 tp->pushed_seq = TCP_SKB_CB(buff)->seq;
3866 }
3867 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
3868
3869
3870 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
3871 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
3872 return 0;
3873}
3874EXPORT_SYMBOL(tcp_connect);
3875
3876
3877
3878
3879
3880void tcp_send_delayed_ack(struct sock *sk)
3881{
3882 struct inet_connection_sock *icsk = inet_csk(sk);
3883 int ato = icsk->icsk_ack.ato;
3884 unsigned long timeout;
3885
3886 if (ato > TCP_DELACK_MIN) {
3887 const struct tcp_sock *tp = tcp_sk(sk);
3888 int max_ato = HZ / 2;
3889
3890 if (inet_csk_in_pingpong_mode(sk) ||
3891 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
3892 max_ato = TCP_DELACK_MAX;
3893
3894
3895
3896
3897
3898
3899
3900 if (tp->srtt_us) {
3901 int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
3902 TCP_DELACK_MIN);
3903
3904 if (rtt < max_ato)
3905 max_ato = rtt;
3906 }
3907
3908 ato = min(ato, max_ato);
3909 }
3910
3911 ato = min_t(u32, ato, inet_csk(sk)->icsk_delack_max);
3912
3913
3914 timeout = jiffies + ato;
3915
3916
3917 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
3918
3919 if (time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
3920 tcp_send_ack(sk);
3921 return;
3922 }
3923
3924 if (!time_before(timeout, icsk->icsk_ack.timeout))
3925 timeout = icsk->icsk_ack.timeout;
3926 }
3927 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3928 icsk->icsk_ack.timeout = timeout;
3929 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
3930}
3931
3932
3933void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
3934{
3935 struct sk_buff *buff;
3936
3937
3938 if (sk->sk_state == TCP_CLOSE)
3939 return;
3940
3941
3942
3943
3944
3945 buff = alloc_skb(MAX_TCP_HEADER,
3946 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
3947 if (unlikely(!buff)) {
3948 struct inet_connection_sock *icsk = inet_csk(sk);
3949 unsigned long delay;
3950
3951 delay = TCP_DELACK_MAX << icsk->icsk_ack.retry;
3952 if (delay < TCP_RTO_MAX)
3953 icsk->icsk_ack.retry++;
3954 inet_csk_schedule_ack(sk);
3955 icsk->icsk_ack.ato = TCP_ATO_MIN;
3956 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, delay, TCP_RTO_MAX);
3957 return;
3958 }
3959
3960
3961 skb_reserve(buff, MAX_TCP_HEADER);
3962 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
3963
3964
3965
3966
3967
3968 skb_set_tcp_pure_ack(buff);
3969
3970
3971 __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
3972}
3973EXPORT_SYMBOL_GPL(__tcp_send_ack);
3974
3975void tcp_send_ack(struct sock *sk)
3976{
3977 __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
3978}
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
3992{
3993 struct tcp_sock *tp = tcp_sk(sk);
3994 struct sk_buff *skb;
3995
3996
3997 skb = alloc_skb(MAX_TCP_HEADER,
3998 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
3999 if (!skb)
4000 return -1;
4001
4002
4003 skb_reserve(skb, MAX_TCP_HEADER);
4004
4005
4006
4007
4008 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
4009 NET_INC_STATS(sock_net(sk), mib);
4010 return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
4011}
4012
4013
4014void tcp_send_window_probe(struct sock *sk)
4015{
4016 if (sk->sk_state == TCP_ESTABLISHED) {
4017 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
4018 tcp_mstamp_refresh(tcp_sk(sk));
4019 tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
4020 }
4021}
4022
4023
4024int tcp_write_wakeup(struct sock *sk, int mib)
4025{
4026 struct tcp_sock *tp = tcp_sk(sk);
4027 struct sk_buff *skb;
4028
4029 if (sk->sk_state == TCP_CLOSE)
4030 return -1;
4031
4032 skb = tcp_send_head(sk);
4033 if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
4034 int err;
4035 unsigned int mss = tcp_current_mss(sk);
4036 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
4037
4038 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
4039 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
4040
4041
4042
4043
4044
4045 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
4046 skb->len > mss) {
4047 seg_size = min(seg_size, mss);
4048 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
4049 if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
4050 skb, seg_size, mss, GFP_ATOMIC))
4051 return -1;
4052 } else if (!tcp_skb_pcount(skb))
4053 tcp_set_skb_tso_segs(skb, mss);
4054
4055 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
4056 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
4057 if (!err)
4058 tcp_event_new_data_sent(sk, skb);
4059 return err;
4060 } else {
4061 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
4062 tcp_xmit_probe_skb(sk, 1, mib);
4063 return tcp_xmit_probe_skb(sk, 0, mib);
4064 }
4065}
4066
4067
4068
4069
4070void tcp_send_probe0(struct sock *sk)
4071{
4072 struct inet_connection_sock *icsk = inet_csk(sk);
4073 struct tcp_sock *tp = tcp_sk(sk);
4074 struct net *net = sock_net(sk);
4075 unsigned long timeout;
4076 int err;
4077
4078 err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
4079
4080 if (tp->packets_out || tcp_write_queue_empty(sk)) {
4081
4082 icsk->icsk_probes_out = 0;
4083 icsk->icsk_backoff = 0;
4084 icsk->icsk_probes_tstamp = 0;
4085 return;
4086 }
4087
4088 icsk->icsk_probes_out++;
4089 if (err <= 0) {
4090 if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2)
4091 icsk->icsk_backoff++;
4092 timeout = tcp_probe0_when(sk, TCP_RTO_MAX);
4093 } else {
4094
4095
4096
4097 timeout = TCP_RESOURCE_PROBE_INTERVAL;
4098 }
4099
4100 timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout);
4101 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX);
4102}
4103
4104int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
4105{
4106 const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
4107 struct flowi fl;
4108 int res;
4109
4110 tcp_rsk(req)->txhash = net_tx_rndhash();
4111 res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL,
4112 NULL);
4113 if (!res) {
4114 __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
4115 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
4116 if (unlikely(tcp_passive_fastopen(sk)))
4117 tcp_sk(sk)->total_retrans++;
4118 trace_tcp_retransmit_synack(sk, req);
4119 }
4120 return res;
4121}
4122EXPORT_SYMBOL(tcp_rtx_synack);
4123