1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/module.h>
22#include <linux/gfp.h>
23#include <net/tcp.h>
24
25int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
26int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
27int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
28int sysctl_tcp_keepalive_probes __read_mostly = TCP_KEEPALIVE_PROBES;
29int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL;
30int sysctl_tcp_retries1 __read_mostly = TCP_RETR1;
31int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
32int sysctl_tcp_orphan_retries __read_mostly;
33int sysctl_tcp_thin_linear_timeouts __read_mostly;
34
35static void tcp_write_timer(unsigned long);
36static void tcp_delack_timer(unsigned long);
37static void tcp_keepalive_timer (unsigned long data);
38
39void tcp_init_xmit_timers(struct sock *sk)
40{
41 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
42 &tcp_keepalive_timer);
43}
44EXPORT_SYMBOL(tcp_init_xmit_timers);
45
46static void tcp_write_err(struct sock *sk)
47{
48 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
49 sk->sk_error_report(sk);
50
51 tcp_done(sk);
52 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
53}
54
55
56
57
58
59
60
61
62
63
64
65
66static int tcp_out_of_resources(struct sock *sk, int do_reset)
67{
68 struct tcp_sock *tp = tcp_sk(sk);
69 int shift = 0;
70
71
72
73 if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
74 shift++;
75
76
77 if (sk->sk_err_soft)
78 shift++;
79
80 if (tcp_check_oom(sk, shift)) {
81
82
83 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
84
85 (!tp->snd_wnd && !tp->packets_out))
86 do_reset = 1;
87 if (do_reset)
88 tcp_send_active_reset(sk, GFP_ATOMIC);
89 tcp_done(sk);
90 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
91 return 1;
92 }
93 return 0;
94}
95
96
97static int tcp_orphan_retries(struct sock *sk, int alive)
98{
99 int retries = sysctl_tcp_orphan_retries;
100
101
102 if (sk->sk_err_soft && !alive)
103 retries = 0;
104
105
106
107
108 if (retries == 0 && alive)
109 retries = 8;
110 return retries;
111}
112
113static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
114{
115
116 if (sysctl_tcp_mtu_probing) {
117 if (!icsk->icsk_mtup.enabled) {
118 icsk->icsk_mtup.enabled = 1;
119 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
120 } else {
121 struct tcp_sock *tp = tcp_sk(sk);
122 int mss;
123
124 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
125 mss = min(sysctl_tcp_base_mss, mss);
126 mss = max(mss, 68 - tp->tcp_header_len);
127 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
128 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
129 }
130 }
131}
132
133
134
135
136
137
138static bool retransmits_timed_out(struct sock *sk,
139 unsigned int boundary,
140 unsigned int timeout,
141 bool syn_set)
142{
143 unsigned int linear_backoff_thresh, start_ts;
144 unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
145
146 if (!inet_csk(sk)->icsk_retransmits)
147 return false;
148
149 if (unlikely(!tcp_sk(sk)->retrans_stamp))
150 start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when;
151 else
152 start_ts = tcp_sk(sk)->retrans_stamp;
153
154 if (likely(timeout == 0)) {
155 linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
156
157 if (boundary <= linear_backoff_thresh)
158 timeout = ((2 << boundary) - 1) * rto_base;
159 else
160 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
161 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
162 }
163 return (tcp_time_stamp - start_ts) >= timeout;
164}
165
166
167static int tcp_write_timeout(struct sock *sk)
168{
169 struct inet_connection_sock *icsk = inet_csk(sk);
170 int retry_until;
171 bool do_reset, syn_set = false;
172
173 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
174 if (icsk->icsk_retransmits)
175 dst_negative_advice(sk);
176 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
177 syn_set = true;
178 } else {
179 if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
180
181 tcp_mtu_probing(icsk, sk);
182
183 dst_negative_advice(sk);
184 }
185
186 retry_until = sysctl_tcp_retries2;
187 if (sock_flag(sk, SOCK_DEAD)) {
188 const int alive = (icsk->icsk_rto < TCP_RTO_MAX);
189
190 retry_until = tcp_orphan_retries(sk, alive);
191 do_reset = alive ||
192 !retransmits_timed_out(sk, retry_until, 0, 0);
193
194 if (tcp_out_of_resources(sk, do_reset))
195 return 1;
196 }
197 }
198
199 if (retransmits_timed_out(sk, retry_until,
200 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
201
202 tcp_write_err(sk);
203 return 1;
204 }
205 return 0;
206}
207
208static void tcp_delack_timer(unsigned long data)
209{
210 struct sock *sk = (struct sock *)data;
211 struct tcp_sock *tp = tcp_sk(sk);
212 struct inet_connection_sock *icsk = inet_csk(sk);
213
214 bh_lock_sock(sk);
215 if (sock_owned_by_user(sk)) {
216
217 icsk->icsk_ack.blocked = 1;
218 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
219 sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN);
220 goto out_unlock;
221 }
222
223 sk_mem_reclaim_partial(sk);
224
225 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
226 goto out;
227
228 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
229 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
230 goto out;
231 }
232 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
233
234 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
235 struct sk_buff *skb;
236
237 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
238
239 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
240 sk_backlog_rcv(sk, skb);
241
242 tp->ucopy.memory = 0;
243 }
244
245 if (inet_csk_ack_scheduled(sk)) {
246 if (!icsk->icsk_ack.pingpong) {
247
248 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
249 } else {
250
251
252
253 icsk->icsk_ack.pingpong = 0;
254 icsk->icsk_ack.ato = TCP_ATO_MIN;
255 }
256 tcp_send_ack(sk);
257 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
258 }
259
260out:
261 if (sk_under_memory_pressure(sk))
262 sk_mem_reclaim(sk);
263out_unlock:
264 bh_unlock_sock(sk);
265 sock_put(sk);
266}
267
268static void tcp_probe_timer(struct sock *sk)
269{
270 struct inet_connection_sock *icsk = inet_csk(sk);
271 struct tcp_sock *tp = tcp_sk(sk);
272 int max_probes;
273
274 if (tp->packets_out || !tcp_send_head(sk)) {
275 icsk->icsk_probes_out = 0;
276 return;
277 }
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294 max_probes = sysctl_tcp_retries2;
295
296 if (sock_flag(sk, SOCK_DEAD)) {
297 const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX);
298
299 max_probes = tcp_orphan_retries(sk, alive);
300
301 if (tcp_out_of_resources(sk, alive || icsk->icsk_probes_out <= max_probes))
302 return;
303 }
304
305 if (icsk->icsk_probes_out > max_probes) {
306 tcp_write_err(sk);
307 } else {
308
309 tcp_send_probe0(sk);
310 }
311}
312
313
314
315
316
317void tcp_retransmit_timer(struct sock *sk)
318{
319 struct tcp_sock *tp = tcp_sk(sk);
320 struct inet_connection_sock *icsk = inet_csk(sk);
321
322 if (!tp->packets_out)
323 goto out;
324
325 WARN_ON(tcp_write_queue_empty(sk));
326
327 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
328 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
329
330
331
332
333
334 struct inet_sock *inet = inet_sk(sk);
335 if (sk->sk_family == AF_INET) {
336 LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
337 &inet->inet_daddr, ntohs(inet->inet_dport),
338 inet->inet_num, tp->snd_una, tp->snd_nxt);
339 }
340#if IS_ENABLED(CONFIG_IPV6)
341 else if (sk->sk_family == AF_INET6) {
342 struct ipv6_pinfo *np = inet6_sk(sk);
343 LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
344 &np->daddr, ntohs(inet->inet_dport),
345 inet->inet_num, tp->snd_una, tp->snd_nxt);
346 }
347#endif
348 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
349 tcp_write_err(sk);
350 goto out;
351 }
352 tcp_enter_loss(sk, 0);
353 tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
354 __sk_dst_reset(sk);
355 goto out_reset_timer;
356 }
357
358 if (tcp_write_timeout(sk))
359 goto out;
360
361 if (icsk->icsk_retransmits == 0) {
362 int mib_idx;
363
364 if (icsk->icsk_ca_state == TCP_CA_Recovery) {
365 if (tcp_is_sack(tp))
366 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
367 else
368 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
369 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
370 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
371 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
372 tp->sacked_out) {
373 if (tcp_is_sack(tp))
374 mib_idx = LINUX_MIB_TCPSACKFAILURES;
375 else
376 mib_idx = LINUX_MIB_TCPRENOFAILURES;
377 } else {
378 mib_idx = LINUX_MIB_TCPTIMEOUTS;
379 }
380 NET_INC_STATS_BH(sock_net(sk), mib_idx);
381 }
382
383 if (tcp_use_frto(sk)) {
384 tcp_enter_frto(sk);
385 } else {
386 tcp_enter_loss(sk, 0);
387 }
388
389 if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) {
390
391
392
393 if (!icsk->icsk_retransmits)
394 icsk->icsk_retransmits = 1;
395 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
396 min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
397 TCP_RTO_MAX);
398 goto out;
399 }
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416 icsk->icsk_backoff++;
417 icsk->icsk_retransmits++;
418
419out_reset_timer:
420
421
422
423
424
425
426
427
428
429 if (sk->sk_state == TCP_ESTABLISHED &&
430 (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
431 tcp_stream_is_thin(tp) &&
432 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
433 icsk->icsk_backoff = 0;
434 icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
435 } else {
436
437 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
438 }
439 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
440 if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
441 __sk_dst_reset(sk);
442
443out:;
444}
445
446static void tcp_write_timer(unsigned long data)
447{
448 struct sock *sk = (struct sock *)data;
449 struct inet_connection_sock *icsk = inet_csk(sk);
450 int event;
451
452 bh_lock_sock(sk);
453 if (sock_owned_by_user(sk)) {
454
455 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20));
456 goto out_unlock;
457 }
458
459 if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
460 goto out;
461
462 if (time_after(icsk->icsk_timeout, jiffies)) {
463 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
464 goto out;
465 }
466
467 event = icsk->icsk_pending;
468 icsk->icsk_pending = 0;
469
470 switch (event) {
471 case ICSK_TIME_RETRANS:
472 tcp_retransmit_timer(sk);
473 break;
474 case ICSK_TIME_PROBE0:
475 tcp_probe_timer(sk);
476 break;
477 }
478
479out:
480 sk_mem_reclaim(sk);
481out_unlock:
482 bh_unlock_sock(sk);
483 sock_put(sk);
484}
485
486
487
488
489
490static void tcp_synack_timer(struct sock *sk)
491{
492 inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL,
493 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
494}
495
496void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
497{
498 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
499}
500EXPORT_SYMBOL(tcp_syn_ack_timeout);
501
502void tcp_set_keepalive(struct sock *sk, int val)
503{
504 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
505 return;
506
507 if (val && !sock_flag(sk, SOCK_KEEPOPEN))
508 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
509 else if (!val)
510 inet_csk_delete_keepalive_timer(sk);
511}
512
513
514static void tcp_keepalive_timer (unsigned long data)
515{
516 struct sock *sk = (struct sock *) data;
517 struct inet_connection_sock *icsk = inet_csk(sk);
518 struct tcp_sock *tp = tcp_sk(sk);
519 u32 elapsed;
520
521
522 bh_lock_sock(sk);
523 if (sock_owned_by_user(sk)) {
524
525 inet_csk_reset_keepalive_timer (sk, HZ/20);
526 goto out;
527 }
528
529 if (sk->sk_state == TCP_LISTEN) {
530 tcp_synack_timer(sk);
531 goto out;
532 }
533
534 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
535 if (tp->linger2 >= 0) {
536 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
537
538 if (tmo > 0) {
539 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
540 goto out;
541 }
542 }
543 tcp_send_active_reset(sk, GFP_ATOMIC);
544 goto death;
545 }
546
547 if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
548 goto out;
549
550 elapsed = keepalive_time_when(tp);
551
552
553 if (tp->packets_out || tcp_send_head(sk))
554 goto resched;
555
556 elapsed = keepalive_time_elapsed(tp);
557
558 if (elapsed >= keepalive_time_when(tp)) {
559
560
561
562 if ((icsk->icsk_user_timeout != 0 &&
563 elapsed >= icsk->icsk_user_timeout &&
564 icsk->icsk_probes_out > 0) ||
565 (icsk->icsk_user_timeout == 0 &&
566 icsk->icsk_probes_out >= keepalive_probes(tp))) {
567 tcp_send_active_reset(sk, GFP_ATOMIC);
568 tcp_write_err(sk);
569 goto out;
570 }
571 if (tcp_write_wakeup(sk) <= 0) {
572 icsk->icsk_probes_out++;
573 elapsed = keepalive_intvl_when(tp);
574 } else {
575
576
577
578 elapsed = TCP_RESOURCE_PROBE_INTERVAL;
579 }
580 } else {
581
582 elapsed = keepalive_time_when(tp) - elapsed;
583 }
584
585 sk_mem_reclaim(sk);
586
587resched:
588 inet_csk_reset_keepalive_timer (sk, elapsed);
589 goto out;
590
591death:
592 tcp_done(sk);
593
594out:
595 bh_unlock_sock(sk);
596 sock_put(sk);
597}
598