1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/dccp.h>
14#include <linux/skbuff.h>
15#include <linux/export.h>
16
17#include "dccp.h"
18
19
20int sysctl_dccp_request_retries __read_mostly = TCP_SYN_RETRIES;
21int sysctl_dccp_retries1 __read_mostly = TCP_RETR1;
22int sysctl_dccp_retries2 __read_mostly = TCP_RETR2;
23
24static void dccp_write_err(struct sock *sk)
25{
26 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
27 sk->sk_error_report(sk);
28
29 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
30 dccp_done(sk);
31 DCCP_INC_STATS_BH(DCCP_MIB_ABORTONTIMEOUT);
32}
33
34
35static int dccp_write_timeout(struct sock *sk)
36{
37 const struct inet_connection_sock *icsk = inet_csk(sk);
38 int retry_until;
39
40 if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) {
41 if (icsk->icsk_retransmits != 0)
42 dst_negative_advice(sk);
43 retry_until = icsk->icsk_syn_retries ?
44 : sysctl_dccp_request_retries;
45 } else {
46 if (icsk->icsk_retransmits >= sysctl_dccp_retries1) {
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67 dst_negative_advice(sk);
68 }
69
70 retry_until = sysctl_dccp_retries2;
71
72
73
74 }
75
76 if (icsk->icsk_retransmits >= retry_until) {
77
78 dccp_write_err(sk);
79 return 1;
80 }
81 return 0;
82}
83
84
85
86
87static void dccp_retransmit_timer(struct sock *sk)
88{
89 struct inet_connection_sock *icsk = inet_csk(sk);
90
91
92
93
94
95 if (dccp_write_timeout(sk))
96 return;
97
98
99
100
101
102 if (icsk->icsk_retransmits == 0)
103 DCCP_INC_STATS_BH(DCCP_MIB_TIMEOUTS);
104
105 if (dccp_retransmit_skb(sk) != 0) {
106
107
108
109
110 if (--icsk->icsk_retransmits == 0)
111 icsk->icsk_retransmits = 1;
112 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
113 min(icsk->icsk_rto,
114 TCP_RESOURCE_PROBE_INTERVAL),
115 DCCP_RTO_MAX);
116 return;
117 }
118
119 icsk->icsk_backoff++;
120
121 icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX);
122 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto,
123 DCCP_RTO_MAX);
124 if (icsk->icsk_retransmits > sysctl_dccp_retries1)
125 __sk_dst_reset(sk);
126}
127
128static void dccp_write_timer(unsigned long data)
129{
130 struct sock *sk = (struct sock *)data;
131 struct inet_connection_sock *icsk = inet_csk(sk);
132 int event = 0;
133
134 bh_lock_sock(sk);
135 if (sock_owned_by_user(sk)) {
136
137 sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
138 jiffies + (HZ / 20));
139 goto out;
140 }
141
142 if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending)
143 goto out;
144
145 if (time_after(icsk->icsk_timeout, jiffies)) {
146 sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
147 icsk->icsk_timeout);
148 goto out;
149 }
150
151 event = icsk->icsk_pending;
152 icsk->icsk_pending = 0;
153
154 switch (event) {
155 case ICSK_TIME_RETRANS:
156 dccp_retransmit_timer(sk);
157 break;
158 }
159out:
160 bh_unlock_sock(sk);
161 sock_put(sk);
162}
163
164
165
166
167static void dccp_response_timer(struct sock *sk)
168{
169 inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL, DCCP_TIMEOUT_INIT,
170 DCCP_RTO_MAX);
171}
172
173static void dccp_keepalive_timer(unsigned long data)
174{
175 struct sock *sk = (struct sock *)data;
176
177
178 bh_lock_sock(sk);
179 if (sock_owned_by_user(sk)) {
180
181 inet_csk_reset_keepalive_timer(sk, HZ / 20);
182 goto out;
183 }
184
185 if (sk->sk_state == DCCP_LISTEN) {
186 dccp_response_timer(sk);
187 goto out;
188 }
189out:
190 bh_unlock_sock(sk);
191 sock_put(sk);
192}
193
194
195static void dccp_delack_timer(unsigned long data)
196{
197 struct sock *sk = (struct sock *)data;
198 struct inet_connection_sock *icsk = inet_csk(sk);
199
200 bh_lock_sock(sk);
201 if (sock_owned_by_user(sk)) {
202
203 icsk->icsk_ack.blocked = 1;
204 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
205 sk_reset_timer(sk, &icsk->icsk_delack_timer,
206 jiffies + TCP_DELACK_MIN);
207 goto out;
208 }
209
210 if (sk->sk_state == DCCP_CLOSED ||
211 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
212 goto out;
213 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
214 sk_reset_timer(sk, &icsk->icsk_delack_timer,
215 icsk->icsk_ack.timeout);
216 goto out;
217 }
218
219 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
220
221 if (inet_csk_ack_scheduled(sk)) {
222 if (!icsk->icsk_ack.pingpong) {
223
224 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1,
225 icsk->icsk_rto);
226 } else {
227
228
229
230 icsk->icsk_ack.pingpong = 0;
231 icsk->icsk_ack.ato = TCP_ATO_MIN;
232 }
233 dccp_send_ack(sk);
234 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
235 }
236out:
237 bh_unlock_sock(sk);
238 sock_put(sk);
239}
240
241
242
243
244
245static void dccp_write_xmitlet(unsigned long data)
246{
247 struct sock *sk = (struct sock *)data;
248
249 bh_lock_sock(sk);
250 if (sock_owned_by_user(sk))
251 sk_reset_timer(sk, &dccp_sk(sk)->dccps_xmit_timer, jiffies + 1);
252 else
253 dccp_write_xmit(sk);
254 bh_unlock_sock(sk);
255}
256
257static void dccp_write_xmit_timer(unsigned long data)
258{
259 dccp_write_xmitlet(data);
260 sock_put((struct sock *)data);
261}
262
263void dccp_init_xmit_timers(struct sock *sk)
264{
265 struct dccp_sock *dp = dccp_sk(sk);
266
267 tasklet_init(&dp->dccps_xmitlet, dccp_write_xmitlet, (unsigned long)sk);
268 setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer,
269 (unsigned long)sk);
270 inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
271 &dccp_keepalive_timer);
272}
273
274static ktime_t dccp_timestamp_seed;
275
276
277
278
279
280
281u32 dccp_timestamp(void)
282{
283 s64 delta = ktime_us_delta(ktime_get_real(), dccp_timestamp_seed);
284
285 do_div(delta, 10);
286 return delta;
287}
288EXPORT_SYMBOL_GPL(dccp_timestamp);
289
290void __init dccp_timestamping_init(void)
291{
292 dccp_timestamp_seed = ktime_get_real();
293}
294