1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/dccp.h>
14#include <linux/kernel.h>
15#include <linux/skbuff.h>
16#include <linux/slab.h>
17
18#include <net/inet_sock.h>
19#include <net/sock.h>
20
21#include "ackvec.h"
22#include "ccid.h"
23#include "dccp.h"
24
25static inline void dccp_event_ack_sent(struct sock *sk)
26{
27 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
28}
29
30
31static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
32{
33 skb_set_owner_w(skb, sk);
34 WARN_ON(sk->sk_send_head);
35 sk->sk_send_head = skb;
36 return skb_clone(sk->sk_send_head, gfp_any());
37}
38
39
40
41
42
43
44
45static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
46{
47 if (likely(skb != NULL)) {
48 struct inet_sock *inet = inet_sk(sk);
49 const struct inet_connection_sock *icsk = inet_csk(sk);
50 struct dccp_sock *dp = dccp_sk(sk);
51 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
52 struct dccp_hdr *dh;
53
54 const u32 dccp_header_size = sizeof(*dh) +
55 sizeof(struct dccp_hdr_ext) +
56 dccp_packet_hdr_len(dcb->dccpd_type);
57 int err, set_ack = 1;
58 u64 ackno = dp->dccps_gsr;
59
60
61
62
63 dcb->dccpd_seq = ADD48(dp->dccps_gss, 1);
64
65 switch (dcb->dccpd_type) {
66 case DCCP_PKT_DATA:
67 set_ack = 0;
68
69 case DCCP_PKT_DATAACK:
70 case DCCP_PKT_RESET:
71 break;
72
73 case DCCP_PKT_REQUEST:
74 set_ack = 0;
75
76 if (icsk->icsk_retransmits == 0)
77 dcb->dccpd_seq = dp->dccps_iss;
78
79
80 case DCCP_PKT_SYNC:
81 case DCCP_PKT_SYNCACK:
82 ackno = dcb->dccpd_ack_seq;
83
84 default:
85
86
87
88
89
90
91 WARN_ON(skb->sk);
92 skb_set_owner_w(skb, sk);
93 break;
94 }
95
96 if (dccp_insert_options(sk, skb)) {
97 kfree_skb(skb);
98 return -EPROTO;
99 }
100
101
102
103 dh = dccp_zeroed_hdr(skb, dccp_header_size);
104 dh->dccph_type = dcb->dccpd_type;
105 dh->dccph_sport = inet->inet_sport;
106 dh->dccph_dport = inet->inet_dport;
107 dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4;
108 dh->dccph_ccval = dcb->dccpd_ccval;
109 dh->dccph_cscov = dp->dccps_pcslen;
110
111 dh->dccph_x = 1;
112
113 dccp_update_gss(sk, dcb->dccpd_seq);
114 dccp_hdr_set_seq(dh, dp->dccps_gss);
115 if (set_ack)
116 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
117
118 switch (dcb->dccpd_type) {
119 case DCCP_PKT_REQUEST:
120 dccp_hdr_request(skb)->dccph_req_service =
121 dp->dccps_service;
122
123
124
125
126 dp->dccps_awl = dp->dccps_iss;
127 break;
128 case DCCP_PKT_RESET:
129 dccp_hdr_reset(skb)->dccph_reset_code =
130 dcb->dccpd_reset_code;
131 break;
132 }
133
134 icsk->icsk_af_ops->send_check(sk, skb);
135
136 if (set_ack)
137 dccp_event_ack_sent(sk);
138
139 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
140
141 err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl);
142 return net_xmit_eval(err);
143 }
144 return -ENOBUFS;
145}
146
147
148
149
150
151
152
153static u32 dccp_determine_ccmps(const struct dccp_sock *dp)
154{
155 const struct ccid *tx_ccid = dp->dccps_hc_tx_ccid;
156
157 if (tx_ccid == NULL || tx_ccid->ccid_ops == NULL)
158 return 0;
159 return tx_ccid->ccid_ops->ccid_ccmps;
160}
161
162unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
163{
164 struct inet_connection_sock *icsk = inet_csk(sk);
165 struct dccp_sock *dp = dccp_sk(sk);
166 u32 ccmps = dccp_determine_ccmps(dp);
167 u32 cur_mps = ccmps ? min(pmtu, ccmps) : pmtu;
168
169
170 cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len +
171 sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext));
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186 cur_mps -= roundup(1 + 6 + 10 + dp->dccps_send_ndp_count * 8 + 6 +
187 (dp->dccps_hc_rx_ackvec ? DCCPAV_MIN_OPTLEN : 0), 4);
188
189
190 icsk->icsk_pmtu_cookie = pmtu;
191 dp->dccps_mss_cache = cur_mps;
192
193 return cur_mps;
194}
195
196EXPORT_SYMBOL_GPL(dccp_sync_mss);
197
198void dccp_write_space(struct sock *sk)
199{
200 struct socket_wq *wq;
201
202 rcu_read_lock();
203 wq = rcu_dereference(sk->sk_wq);
204 if (wq_has_sleeper(wq))
205 wake_up_interruptible(&wq->wait);
206
207 if (sock_writeable(sk))
208 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
209
210 rcu_read_unlock();
211}
212
213
214
215
216
217
218
219static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
220{
221 DEFINE_WAIT(wait);
222 long remaining;
223
224 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
225 sk->sk_write_pending++;
226 release_sock(sk);
227
228 remaining = schedule_timeout(delay);
229
230 lock_sock(sk);
231 sk->sk_write_pending--;
232 finish_wait(sk_sleep(sk), &wait);
233
234 if (signal_pending(current) || sk->sk_err)
235 return -1;
236 return remaining;
237}
238
239
240
241
242
243static void dccp_xmit_packet(struct sock *sk)
244{
245 int err, len;
246 struct dccp_sock *dp = dccp_sk(sk);
247 struct sk_buff *skb = dccp_qpolicy_pop(sk);
248
249 if (unlikely(skb == NULL))
250 return;
251 len = skb->len;
252
253 if (sk->sk_state == DCCP_PARTOPEN) {
254 const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
255
256
257
258
259
260
261
262 if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
263 DCCP_WARN("Payload too large (%d) for featneg.\n", len);
264 dccp_send_ack(sk);
265 dccp_feat_list_purge(&dp->dccps_featneg);
266 }
267
268 inet_csk_schedule_ack(sk);
269 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
270 inet_csk(sk)->icsk_rto,
271 DCCP_RTO_MAX);
272 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
273 } else if (dccp_ack_pending(sk)) {
274 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
275 } else {
276 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA;
277 }
278
279 err = dccp_transmit_skb(sk, skb);
280 if (err)
281 dccp_pr_debug("transmit_skb() returned err=%d\n", err);
282
283
284
285
286
287 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
288
289
290
291
292
293
294
295 if (dp->dccps_sync_scheduled)
296 dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
297}
298
299
300
301
302
303
304
305
306void dccp_flush_write_queue(struct sock *sk, long *time_budget)
307{
308 struct dccp_sock *dp = dccp_sk(sk);
309 struct sk_buff *skb;
310 long delay, rc;
311
312 while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) {
313 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
314
315 switch (ccid_packet_dequeue_eval(rc)) {
316 case CCID_PACKET_WILL_DEQUEUE_LATER:
317
318
319
320
321
322 DCCP_WARN("CCID did not manage to send all packets\n");
323 return;
324 case CCID_PACKET_DELAY:
325 delay = msecs_to_jiffies(rc);
326 if (delay > *time_budget)
327 return;
328 rc = dccp_wait_for_ccid(sk, delay);
329 if (rc < 0)
330 return;
331 *time_budget -= (delay - rc);
332
333 break;
334 case CCID_PACKET_SEND_AT_ONCE:
335 dccp_xmit_packet(sk);
336 break;
337 case CCID_PACKET_ERR:
338 skb_dequeue(&sk->sk_write_queue);
339 kfree_skb(skb);
340 dccp_pr_debug("packet discarded due to err=%ld\n", rc);
341 }
342 }
343}
344
345void dccp_write_xmit(struct sock *sk)
346{
347 struct dccp_sock *dp = dccp_sk(sk);
348 struct sk_buff *skb;
349
350 while ((skb = dccp_qpolicy_top(sk))) {
351 int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
352
353 switch (ccid_packet_dequeue_eval(rc)) {
354 case CCID_PACKET_WILL_DEQUEUE_LATER:
355 return;
356 case CCID_PACKET_DELAY:
357 sk_reset_timer(sk, &dp->dccps_xmit_timer,
358 jiffies + msecs_to_jiffies(rc));
359 return;
360 case CCID_PACKET_SEND_AT_ONCE:
361 dccp_xmit_packet(sk);
362 break;
363 case CCID_PACKET_ERR:
364 dccp_qpolicy_drop(sk, skb);
365 dccp_pr_debug("packet discarded due to err=%d\n", rc);
366 }
367 }
368}
369
370
371
372
373
374
375
376
377
378
379int dccp_retransmit_skb(struct sock *sk)
380{
381 WARN_ON(sk->sk_send_head == NULL);
382
383 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0)
384 return -EHOSTUNREACH;
385
386
387 inet_csk(sk)->icsk_retransmits++;
388
389 return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC));
390}
391
392struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
393 struct request_sock *req)
394{
395 struct dccp_hdr *dh;
396 struct dccp_request_sock *dreq;
397 const u32 dccp_header_size = sizeof(struct dccp_hdr) +
398 sizeof(struct dccp_hdr_ext) +
399 sizeof(struct dccp_hdr_response);
400 struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1,
401 GFP_ATOMIC);
402 if (skb == NULL)
403 return NULL;
404
405
406 skb_reserve(skb, sk->sk_prot->max_header);
407
408 skb_dst_set(skb, dst_clone(dst));
409
410 dreq = dccp_rsk(req);
411 if (inet_rsk(req)->acked)
412 dccp_inc_seqno(&dreq->dreq_iss);
413 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
414 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss;
415
416
417 if (dccp_feat_server_ccid_dependencies(dreq))
418 goto response_failed;
419
420 if (dccp_insert_options_rsk(dreq, skb))
421 goto response_failed;
422
423
424 dh = dccp_zeroed_hdr(skb, dccp_header_size);
425
426 dh->dccph_sport = inet_rsk(req)->loc_port;
427 dh->dccph_dport = inet_rsk(req)->rmt_port;
428 dh->dccph_doff = (dccp_header_size +
429 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
430 dh->dccph_type = DCCP_PKT_RESPONSE;
431 dh->dccph_x = 1;
432 dccp_hdr_set_seq(dh, dreq->dreq_iss);
433 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr);
434 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
435
436 dccp_csum_outgoing(skb);
437
438
439 inet_rsk(req)->acked = 1;
440 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
441 return skb;
442response_failed:
443 kfree_skb(skb);
444 return NULL;
445}
446
447EXPORT_SYMBOL_GPL(dccp_make_response);
448
449
450struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb)
451{
452 struct dccp_hdr *rxdh = dccp_hdr(rcv_skb), *dh;
453 struct dccp_skb_cb *dcb = DCCP_SKB_CB(rcv_skb);
454 const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
455 sizeof(struct dccp_hdr_ext) +
456 sizeof(struct dccp_hdr_reset);
457 struct dccp_hdr_reset *dhr;
458 struct sk_buff *skb;
459
460 skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
461 if (skb == NULL)
462 return NULL;
463
464 skb_reserve(skb, sk->sk_prot->max_header);
465
466
467 dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
468 dh->dccph_type = DCCP_PKT_RESET;
469 dh->dccph_sport = rxdh->dccph_dport;
470 dh->dccph_dport = rxdh->dccph_sport;
471 dh->dccph_doff = dccp_hdr_reset_len / 4;
472 dh->dccph_x = 1;
473
474 dhr = dccp_hdr_reset(skb);
475 dhr->dccph_reset_code = dcb->dccpd_reset_code;
476
477 switch (dcb->dccpd_reset_code) {
478 case DCCP_RESET_CODE_PACKET_ERROR:
479 dhr->dccph_reset_data[0] = rxdh->dccph_type;
480 break;
481 case DCCP_RESET_CODE_OPTION_ERROR:
482 case DCCP_RESET_CODE_MANDATORY_ERROR:
483 memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3);
484 break;
485 }
486
487
488
489
490
491 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
492 dccp_hdr_set_seq(dh, ADD48(dcb->dccpd_ack_seq, 1));
493 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dcb->dccpd_seq);
494
495 dccp_csum_outgoing(skb);
496 return skb;
497}
498
499EXPORT_SYMBOL_GPL(dccp_ctl_make_reset);
500
501
502int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
503{
504 struct sk_buff *skb;
505
506
507
508
509 int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk);
510
511 if (err != 0)
512 return err;
513
514 skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC);
515 if (skb == NULL)
516 return -ENOBUFS;
517
518
519 skb_reserve(skb, sk->sk_prot->max_header);
520 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET;
521 DCCP_SKB_CB(skb)->dccpd_reset_code = code;
522
523 return dccp_transmit_skb(sk, skb);
524}
525
526
527
528
529int dccp_connect(struct sock *sk)
530{
531 struct sk_buff *skb;
532 struct dccp_sock *dp = dccp_sk(sk);
533 struct dst_entry *dst = __sk_dst_get(sk);
534 struct inet_connection_sock *icsk = inet_csk(sk);
535
536 sk->sk_err = 0;
537 sock_reset_flag(sk, SOCK_DONE);
538
539 dccp_sync_mss(sk, dst_mtu(dst));
540
541
542 if (dccp_feat_finalise_settings(dccp_sk(sk)))
543 return -EPROTO;
544
545
546 dp->dccps_gar = dp->dccps_iss;
547
548 skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation);
549 if (unlikely(skb == NULL))
550 return -ENOBUFS;
551
552
553 skb_reserve(skb, sk->sk_prot->max_header);
554
555 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
556
557 dccp_transmit_skb(sk, dccp_skb_entail(sk, skb));
558 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
559
560
561 icsk->icsk_retransmits = 0;
562 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
563 icsk->icsk_rto, DCCP_RTO_MAX);
564 return 0;
565}
566
567EXPORT_SYMBOL_GPL(dccp_connect);
568
569void dccp_send_ack(struct sock *sk)
570{
571
572 if (sk->sk_state != DCCP_CLOSED) {
573 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
574 GFP_ATOMIC);
575
576 if (skb == NULL) {
577 inet_csk_schedule_ack(sk);
578 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
579 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
580 TCP_DELACK_MAX,
581 DCCP_RTO_MAX);
582 return;
583 }
584
585
586 skb_reserve(skb, sk->sk_prot->max_header);
587 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
588 dccp_transmit_skb(sk, skb);
589 }
590}
591
592EXPORT_SYMBOL_GPL(dccp_send_ack);
593
594#if 0
595
596void dccp_send_delayed_ack(struct sock *sk)
597{
598 struct inet_connection_sock *icsk = inet_csk(sk);
599
600
601
602
603
604 unsigned long timeout = jiffies + 2 * HZ;
605
606
607 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
608
609
610
611
612
613 if (icsk->icsk_ack.blocked) {
614 dccp_send_ack(sk);
615 return;
616 }
617
618 if (!time_before(timeout, icsk->icsk_ack.timeout))
619 timeout = icsk->icsk_ack.timeout;
620 }
621 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
622 icsk->icsk_ack.timeout = timeout;
623 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
624}
625#endif
626
627void dccp_send_sync(struct sock *sk, const u64 ackno,
628 const enum dccp_pkt_type pkt_type)
629{
630
631
632
633
634
635 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
636
637 if (skb == NULL) {
638
639 DCCP_CRIT("could not send %s", dccp_packet_name(pkt_type));
640 return;
641 }
642
643
644 skb_reserve(skb, sk->sk_prot->max_header);
645 DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
646 DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno;
647
648
649
650
651
652 dccp_sk(sk)->dccps_sync_scheduled = 0;
653
654 dccp_transmit_skb(sk, skb);
655}
656
657EXPORT_SYMBOL_GPL(dccp_send_sync);
658
659
660
661
662
663
664void dccp_send_close(struct sock *sk, const int active)
665{
666 struct dccp_sock *dp = dccp_sk(sk);
667 struct sk_buff *skb;
668 const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
669
670 skb = alloc_skb(sk->sk_prot->max_header, prio);
671 if (skb == NULL)
672 return;
673
674
675 skb_reserve(skb, sk->sk_prot->max_header);
676 if (dp->dccps_role == DCCP_ROLE_SERVER && !dp->dccps_server_timewait)
677 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSEREQ;
678 else
679 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
680
681 if (active) {
682 skb = dccp_skb_entail(sk, skb);
683
684
685
686
687
688
689
690
691
692
693 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
694 DCCP_TIMEOUT_INIT, DCCP_RTO_MAX);
695 }
696 dccp_transmit_skb(sk, skb);
697}
698