1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/module.h>
29#include <linux/errno.h>
30#include <linux/types.h>
31#include <linux/socket.h>
32#include <linux/sockios.h>
33#include <linux/net.h>
34#include <linux/jiffies.h>
35#include <linux/in.h>
36#include <linux/in6.h>
37#include <linux/netdevice.h>
38#include <linux/init.h>
39#include <linux/jhash.h>
40#include <linux/ipsec.h>
41#include <linux/times.h>
42
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
49#include <net/inet6_hashtables.h>
50#include <net/inet6_connection_sock.h>
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
59#include <net/snmp.h>
60#include <net/dsfield.h>
61#include <net/timewait_sock.h>
62#include <net/netdma.h>
63
64#include <asm/uaccess.h>
65
66#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
69#include <linux/crypto.h>
70#include <linux/scatterlist.h>
71
72
73static struct socket *tcp6_socket;
74
75static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
76static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
77static void tcp_v6_send_check(struct sock *sk, int len,
78 struct sk_buff *skb);
79
80static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
81
82static struct inet_connection_sock_af_ops ipv6_mapped;
83static struct inet_connection_sock_af_ops ipv6_specific;
84#ifdef CONFIG_TCP_MD5SIG
85static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
86static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
87#endif
88
89static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
90{
91 return inet_csk_get_port(&tcp_hashinfo, sk, snum,
92 inet6_csk_bind_conflict);
93}
94
95static void tcp_v6_hash(struct sock *sk)
96{
97 if (sk->sk_state != TCP_CLOSE) {
98 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
99 tcp_prot.hash(sk);
100 return;
101 }
102 local_bh_disable();
103 __inet6_hash(&tcp_hashinfo, sk);
104 local_bh_enable();
105 }
106}
107
108static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len,
109 struct in6_addr *saddr,
110 struct in6_addr *daddr,
111 __wsum base)
112{
113 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
114}
115
116static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
117{
118 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
119 ipv6_hdr(skb)->saddr.s6_addr32,
120 tcp_hdr(skb)->dest,
121 tcp_hdr(skb)->source);
122}
123
124static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
125 int addr_len)
126{
127 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
128 struct inet_sock *inet = inet_sk(sk);
129 struct inet_connection_sock *icsk = inet_csk(sk);
130 struct ipv6_pinfo *np = inet6_sk(sk);
131 struct tcp_sock *tp = tcp_sk(sk);
132 struct in6_addr *saddr = NULL, *final_p = NULL, final;
133 struct flowi fl;
134 struct dst_entry *dst;
135 int addr_type;
136 int err;
137
138 if (addr_len < SIN6_LEN_RFC2133)
139 return -EINVAL;
140
141 if (usin->sin6_family != AF_INET6)
142 return(-EAFNOSUPPORT);
143
144 memset(&fl, 0, sizeof(fl));
145
146 if (np->sndflow) {
147 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
148 IP6_ECN_flow_init(fl.fl6_flowlabel);
149 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
150 struct ip6_flowlabel *flowlabel;
151 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
152 if (flowlabel == NULL)
153 return -EINVAL;
154 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
155 fl6_sock_release(flowlabel);
156 }
157 }
158
159
160
161
162
163 if(ipv6_addr_any(&usin->sin6_addr))
164 usin->sin6_addr.s6_addr[15] = 0x1;
165
166 addr_type = ipv6_addr_type(&usin->sin6_addr);
167
168 if(addr_type & IPV6_ADDR_MULTICAST)
169 return -ENETUNREACH;
170
171 if (addr_type&IPV6_ADDR_LINKLOCAL) {
172 if (addr_len >= sizeof(struct sockaddr_in6) &&
173 usin->sin6_scope_id) {
174
175
176
177 if (sk->sk_bound_dev_if &&
178 sk->sk_bound_dev_if != usin->sin6_scope_id)
179 return -EINVAL;
180
181 sk->sk_bound_dev_if = usin->sin6_scope_id;
182 }
183
184
185 if (!sk->sk_bound_dev_if)
186 return -EINVAL;
187 }
188
189 if (tp->rx_opt.ts_recent_stamp &&
190 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
191 tp->rx_opt.ts_recent = 0;
192 tp->rx_opt.ts_recent_stamp = 0;
193 tp->write_seq = 0;
194 }
195
196 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
197 np->flow_label = fl.fl6_flowlabel;
198
199
200
201
202
203 if (addr_type == IPV6_ADDR_MAPPED) {
204 u32 exthdrlen = icsk->icsk_ext_hdr_len;
205 struct sockaddr_in sin;
206
207 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
208
209 if (__ipv6_only_sock(sk))
210 return -ENETUNREACH;
211
212 sin.sin_family = AF_INET;
213 sin.sin_port = usin->sin6_port;
214 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
215
216 icsk->icsk_af_ops = &ipv6_mapped;
217 sk->sk_backlog_rcv = tcp_v4_do_rcv;
218#ifdef CONFIG_TCP_MD5SIG
219 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
220#endif
221
222 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
223
224 if (err) {
225 icsk->icsk_ext_hdr_len = exthdrlen;
226 icsk->icsk_af_ops = &ipv6_specific;
227 sk->sk_backlog_rcv = tcp_v6_do_rcv;
228#ifdef CONFIG_TCP_MD5SIG
229 tp->af_specific = &tcp_sock_ipv6_specific;
230#endif
231 goto failure;
232 } else {
233 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
234 inet->saddr);
235 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
236 inet->rcv_saddr);
237 }
238
239 return err;
240 }
241
242 if (!ipv6_addr_any(&np->rcv_saddr))
243 saddr = &np->rcv_saddr;
244
245 fl.proto = IPPROTO_TCP;
246 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
247 ipv6_addr_copy(&fl.fl6_src,
248 (saddr ? saddr : &np->saddr));
249 fl.oif = sk->sk_bound_dev_if;
250 fl.fl_ip_dport = usin->sin6_port;
251 fl.fl_ip_sport = inet->sport;
252
253 if (np->opt && np->opt->srcrt) {
254 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
255 ipv6_addr_copy(&final, &fl.fl6_dst);
256 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
257 final_p = &final;
258 }
259
260 security_sk_classify_flow(sk, &fl);
261
262 err = ip6_dst_lookup(sk, &dst, &fl);
263 if (err)
264 goto failure;
265 if (final_p)
266 ipv6_addr_copy(&fl.fl6_dst, final_p);
267
268 if ((err = __xfrm_lookup(&dst, &fl, sk, 1)) < 0) {
269 if (err == -EREMOTE)
270 err = ip6_dst_blackhole(sk, &dst, &fl);
271 if (err < 0)
272 goto failure;
273 }
274
275 if (saddr == NULL) {
276 saddr = &fl.fl6_src;
277 ipv6_addr_copy(&np->rcv_saddr, saddr);
278 }
279
280
281 ipv6_addr_copy(&np->saddr, saddr);
282 inet->rcv_saddr = LOOPBACK4_IPV6;
283
284 sk->sk_gso_type = SKB_GSO_TCPV6;
285 __ip6_dst_store(sk, dst, NULL, NULL);
286
287 icsk->icsk_ext_hdr_len = 0;
288 if (np->opt)
289 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
290 np->opt->opt_nflen);
291
292 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
293
294 inet->dport = usin->sin6_port;
295
296 tcp_set_state(sk, TCP_SYN_SENT);
297 err = inet6_hash_connect(&tcp_death_row, sk);
298 if (err)
299 goto late_failure;
300
301 if (!tp->write_seq)
302 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
303 np->daddr.s6_addr32,
304 inet->sport,
305 inet->dport);
306
307 err = tcp_connect(sk);
308 if (err)
309 goto late_failure;
310
311 return 0;
312
313late_failure:
314 tcp_set_state(sk, TCP_CLOSE);
315 __sk_dst_reset(sk);
316failure:
317 inet->dport = 0;
318 sk->sk_route_caps = 0;
319 return err;
320}
321
322static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
323 int type, int code, int offset, __be32 info)
324{
325 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
326 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
327 struct ipv6_pinfo *np;
328 struct sock *sk;
329 int err;
330 struct tcp_sock *tp;
331 __u32 seq;
332
333 sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr,
334 th->source, skb->dev->ifindex);
335
336 if (sk == NULL) {
337 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
338 return;
339 }
340
341 if (sk->sk_state == TCP_TIME_WAIT) {
342 inet_twsk_put(inet_twsk(sk));
343 return;
344 }
345
346 bh_lock_sock(sk);
347 if (sock_owned_by_user(sk))
348 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
349
350 if (sk->sk_state == TCP_CLOSE)
351 goto out;
352
353 tp = tcp_sk(sk);
354 seq = ntohl(th->seq);
355 if (sk->sk_state != TCP_LISTEN &&
356 !between(seq, tp->snd_una, tp->snd_nxt)) {
357 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
358 goto out;
359 }
360
361 np = inet6_sk(sk);
362
363 if (type == ICMPV6_PKT_TOOBIG) {
364 struct dst_entry *dst = NULL;
365
366 if (sock_owned_by_user(sk))
367 goto out;
368 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
369 goto out;
370
371
372 dst = __sk_dst_check(sk, np->dst_cookie);
373
374 if (dst == NULL) {
375 struct inet_sock *inet = inet_sk(sk);
376 struct flowi fl;
377
378
379
380
381
382 memset(&fl, 0, sizeof(fl));
383 fl.proto = IPPROTO_TCP;
384 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
385 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
386 fl.oif = sk->sk_bound_dev_if;
387 fl.fl_ip_dport = inet->dport;
388 fl.fl_ip_sport = inet->sport;
389 security_skb_classify_flow(skb, &fl);
390
391 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
392 sk->sk_err_soft = -err;
393 goto out;
394 }
395
396 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
397 sk->sk_err_soft = -err;
398 goto out;
399 }
400
401 } else
402 dst_hold(dst);
403
404 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
405 tcp_sync_mss(sk, dst_mtu(dst));
406 tcp_simple_retransmit(sk);
407 }
408 dst_release(dst);
409 goto out;
410 }
411
412 icmpv6_err_convert(type, code, &err);
413
414
415 switch (sk->sk_state) {
416 struct request_sock *req, **prev;
417 case TCP_LISTEN:
418 if (sock_owned_by_user(sk))
419 goto out;
420
421 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
422 &hdr->saddr, inet6_iif(skb));
423 if (!req)
424 goto out;
425
426
427
428
429 BUG_TRAP(req->sk == NULL);
430
431 if (seq != tcp_rsk(req)->snt_isn) {
432 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
433 goto out;
434 }
435
436 inet_csk_reqsk_queue_drop(sk, req, prev);
437 goto out;
438
439 case TCP_SYN_SENT:
440 case TCP_SYN_RECV:
441
442 if (!sock_owned_by_user(sk)) {
443 sk->sk_err = err;
444 sk->sk_error_report(sk);
445
446 tcp_done(sk);
447 } else
448 sk->sk_err_soft = err;
449 goto out;
450 }
451
452 if (!sock_owned_by_user(sk) && np->recverr) {
453 sk->sk_err = err;
454 sk->sk_error_report(sk);
455 } else
456 sk->sk_err_soft = err;
457
458out:
459 bh_unlock_sock(sk);
460 sock_put(sk);
461}
462
463
464static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
465 struct dst_entry *dst)
466{
467 struct inet6_request_sock *treq = inet6_rsk(req);
468 struct ipv6_pinfo *np = inet6_sk(sk);
469 struct sk_buff * skb;
470 struct ipv6_txoptions *opt = NULL;
471 struct in6_addr * final_p = NULL, final;
472 struct flowi fl;
473 int err = -1;
474
475 memset(&fl, 0, sizeof(fl));
476 fl.proto = IPPROTO_TCP;
477 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
478 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
479 fl.fl6_flowlabel = 0;
480 fl.oif = treq->iif;
481 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
482 fl.fl_ip_sport = inet_sk(sk)->sport;
483 security_req_classify_flow(req, &fl);
484
485 if (dst == NULL) {
486 opt = np->opt;
487 if (opt && opt->srcrt) {
488 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
489 ipv6_addr_copy(&final, &fl.fl6_dst);
490 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
491 final_p = &final;
492 }
493
494 err = ip6_dst_lookup(sk, &dst, &fl);
495 if (err)
496 goto done;
497 if (final_p)
498 ipv6_addr_copy(&fl.fl6_dst, final_p);
499 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
500 goto done;
501 }
502
503 skb = tcp_make_synack(sk, dst, req);
504 if (skb) {
505 struct tcphdr *th = tcp_hdr(skb);
506
507 th->check = tcp_v6_check(th, skb->len,
508 &treq->loc_addr, &treq->rmt_addr,
509 csum_partial((char *)th, skb->len, skb->csum));
510
511 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
512 err = ip6_xmit(sk, skb, &fl, opt, 0);
513 err = net_xmit_eval(err);
514 }
515
516done:
517 if (opt && opt != np->opt)
518 sock_kfree_s(sk, opt, opt->tot_len);
519 dst_release(dst);
520 return err;
521}
522
523static void tcp_v6_reqsk_destructor(struct request_sock *req)
524{
525 if (inet6_rsk(req)->pktopts)
526 kfree_skb(inet6_rsk(req)->pktopts);
527}
528
529#ifdef CONFIG_TCP_MD5SIG
530static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
531 struct in6_addr *addr)
532{
533 struct tcp_sock *tp = tcp_sk(sk);
534 int i;
535
536 BUG_ON(tp == NULL);
537
538 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
539 return NULL;
540
541 for (i = 0; i < tp->md5sig_info->entries6; i++) {
542 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0)
543 return &tp->md5sig_info->keys6[i].base;
544 }
545 return NULL;
546}
547
548static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
549 struct sock *addr_sk)
550{
551 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
552}
553
554static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
555 struct request_sock *req)
556{
557 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
558}
559
560static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
561 char *newkey, u8 newkeylen)
562{
563
564 struct tcp_md5sig_key *key;
565 struct tcp_sock *tp = tcp_sk(sk);
566 struct tcp6_md5sig_key *keys;
567
568 key = tcp_v6_md5_do_lookup(sk, peer);
569 if (key) {
570
571 kfree(key->key);
572 key->key = newkey;
573 key->keylen = newkeylen;
574 } else {
575
576 if (!tp->md5sig_info) {
577 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
578 if (!tp->md5sig_info) {
579 kfree(newkey);
580 return -ENOMEM;
581 }
582 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
583 }
584 if (tcp_alloc_md5sig_pool() == NULL) {
585 kfree(newkey);
586 return -ENOMEM;
587 }
588 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
589 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
590 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
591
592 if (!keys) {
593 tcp_free_md5sig_pool();
594 kfree(newkey);
595 return -ENOMEM;
596 }
597
598 if (tp->md5sig_info->entries6)
599 memmove(keys, tp->md5sig_info->keys6,
600 (sizeof (tp->md5sig_info->keys6[0]) *
601 tp->md5sig_info->entries6));
602
603 kfree(tp->md5sig_info->keys6);
604 tp->md5sig_info->keys6 = keys;
605 tp->md5sig_info->alloced6++;
606 }
607
608 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
609 peer);
610 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
611 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
612
613 tp->md5sig_info->entries6++;
614 }
615 return 0;
616}
617
618static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
619 u8 *newkey, __u8 newkeylen)
620{
621 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
622 newkey, newkeylen);
623}
624
625static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
626{
627 struct tcp_sock *tp = tcp_sk(sk);
628 int i;
629
630 for (i = 0; i < tp->md5sig_info->entries6; i++) {
631 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) {
632
633 kfree(tp->md5sig_info->keys6[i].base.key);
634 tp->md5sig_info->entries6--;
635
636 if (tp->md5sig_info->entries6 == 0) {
637 kfree(tp->md5sig_info->keys6);
638 tp->md5sig_info->keys6 = NULL;
639 tp->md5sig_info->alloced6 = 0;
640 } else {
641
642 if (tp->md5sig_info->entries6 != i)
643 memmove(&tp->md5sig_info->keys6[i],
644 &tp->md5sig_info->keys6[i+1],
645 (tp->md5sig_info->entries6 - i)
646 * sizeof (tp->md5sig_info->keys6[0]));
647 }
648 tcp_free_md5sig_pool();
649 return 0;
650 }
651 }
652 return -ENOENT;
653}
654
655static void tcp_v6_clear_md5_list (struct sock *sk)
656{
657 struct tcp_sock *tp = tcp_sk(sk);
658 int i;
659
660 if (tp->md5sig_info->entries6) {
661 for (i = 0; i < tp->md5sig_info->entries6; i++)
662 kfree(tp->md5sig_info->keys6[i].base.key);
663 tp->md5sig_info->entries6 = 0;
664 tcp_free_md5sig_pool();
665 }
666
667 kfree(tp->md5sig_info->keys6);
668 tp->md5sig_info->keys6 = NULL;
669 tp->md5sig_info->alloced6 = 0;
670
671 if (tp->md5sig_info->entries4) {
672 for (i = 0; i < tp->md5sig_info->entries4; i++)
673 kfree(tp->md5sig_info->keys4[i].base.key);
674 tp->md5sig_info->entries4 = 0;
675 tcp_free_md5sig_pool();
676 }
677
678 kfree(tp->md5sig_info->keys4);
679 tp->md5sig_info->keys4 = NULL;
680 tp->md5sig_info->alloced4 = 0;
681}
682
683static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
684 int optlen)
685{
686 struct tcp_md5sig cmd;
687 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
688 u8 *newkey;
689
690 if (optlen < sizeof(cmd))
691 return -EINVAL;
692
693 if (copy_from_user(&cmd, optval, sizeof(cmd)))
694 return -EFAULT;
695
696 if (sin6->sin6_family != AF_INET6)
697 return -EINVAL;
698
699 if (!cmd.tcpm_keylen) {
700 if (!tcp_sk(sk)->md5sig_info)
701 return -ENOENT;
702 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
703 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
704 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
705 }
706
707 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
708 return -EINVAL;
709
710 if (!tcp_sk(sk)->md5sig_info) {
711 struct tcp_sock *tp = tcp_sk(sk);
712 struct tcp_md5sig_info *p;
713
714 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
715 if (!p)
716 return -ENOMEM;
717
718 tp->md5sig_info = p;
719 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
720 }
721
722 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
723 if (!newkey)
724 return -ENOMEM;
725 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
726 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
727 newkey, cmd.tcpm_keylen);
728 }
729 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
730}
731
732static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
733 struct in6_addr *saddr,
734 struct in6_addr *daddr,
735 struct tcphdr *th, int protocol,
736 int tcplen)
737{
738 struct scatterlist sg[4];
739 __u16 data_len;
740 int block = 0;
741 __sum16 cksum;
742 struct tcp_md5sig_pool *hp;
743 struct tcp6_pseudohdr *bp;
744 struct hash_desc *desc;
745 int err;
746 unsigned int nbytes = 0;
747
748 hp = tcp_get_md5sig_pool();
749 if (!hp) {
750 printk(KERN_WARNING "%s(): hash pool not found...\n", __FUNCTION__);
751 goto clear_hash_noput;
752 }
753 bp = &hp->md5_blk.ip6;
754 desc = &hp->md5_desc;
755
756
757 ipv6_addr_copy(&bp->saddr, saddr);
758 ipv6_addr_copy(&bp->daddr, daddr);
759 bp->len = htonl(tcplen);
760 bp->protocol = htonl(protocol);
761
762 sg_init_table(sg, 4);
763
764 sg_set_buf(&sg[block++], bp, sizeof(*bp));
765 nbytes += sizeof(*bp);
766
767
768 cksum = th->check;
769 th->check = 0;
770 sg_set_buf(&sg[block++], th, sizeof(*th));
771 nbytes += sizeof(*th);
772
773
774 data_len = tcplen - (th->doff << 2);
775 if (data_len > 0) {
776 u8 *data = (u8 *)th + (th->doff << 2);
777 sg_set_buf(&sg[block++], data, data_len);
778 nbytes += data_len;
779 }
780
781
782 sg_set_buf(&sg[block++], key->key, key->keylen);
783 nbytes += key->keylen;
784
785 sg_mark_end(&sg[block - 1]);
786
787
788 err = crypto_hash_init(desc);
789 if (err) {
790 printk(KERN_WARNING "%s(): hash_init failed\n", __FUNCTION__);
791 goto clear_hash;
792 }
793 err = crypto_hash_update(desc, sg, nbytes);
794 if (err) {
795 printk(KERN_WARNING "%s(): hash_update failed\n", __FUNCTION__);
796 goto clear_hash;
797 }
798 err = crypto_hash_final(desc, md5_hash);
799 if (err) {
800 printk(KERN_WARNING "%s(): hash_final failed\n", __FUNCTION__);
801 goto clear_hash;
802 }
803
804
805 tcp_put_md5sig_pool();
806 th->check = cksum;
807out:
808 return 0;
809clear_hash:
810 tcp_put_md5sig_pool();
811clear_hash_noput:
812 memset(md5_hash, 0, 16);
813 goto out;
814}
815
816static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
817 struct sock *sk,
818 struct dst_entry *dst,
819 struct request_sock *req,
820 struct tcphdr *th, int protocol,
821 int tcplen)
822{
823 struct in6_addr *saddr, *daddr;
824
825 if (sk) {
826 saddr = &inet6_sk(sk)->saddr;
827 daddr = &inet6_sk(sk)->daddr;
828 } else {
829 saddr = &inet6_rsk(req)->loc_addr;
830 daddr = &inet6_rsk(req)->rmt_addr;
831 }
832 return tcp_v6_do_calc_md5_hash(md5_hash, key,
833 saddr, daddr,
834 th, protocol, tcplen);
835}
836
837static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
838{
839 __u8 *hash_location = NULL;
840 struct tcp_md5sig_key *hash_expected;
841 struct ipv6hdr *ip6h = ipv6_hdr(skb);
842 struct tcphdr *th = tcp_hdr(skb);
843 int length = (th->doff << 2) - sizeof (*th);
844 int genhash;
845 u8 *ptr;
846 u8 newhash[16];
847
848 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
849
850
851 if (length < TCPOLEN_MD5SIG)
852 return hash_expected ? 1 : 0;
853
854
855 ptr = (u8*)(th + 1);
856 while (length > 0) {
857 int opcode = *ptr++;
858 int opsize;
859
860 switch(opcode) {
861 case TCPOPT_EOL:
862 goto done_opts;
863 case TCPOPT_NOP:
864 length--;
865 continue;
866 default:
867 opsize = *ptr++;
868 if (opsize < 2 || opsize > length)
869 goto done_opts;
870 if (opcode == TCPOPT_MD5SIG) {
871 hash_location = ptr;
872 goto done_opts;
873 }
874 }
875 ptr += opsize - 2;
876 length -= opsize;
877 }
878
879done_opts:
880
881 if (!hash_expected) {
882 if (!hash_location)
883 return 0;
884 if (net_ratelimit()) {
885 printk(KERN_INFO "MD5 Hash NOT expected but found "
886 "(" NIP6_FMT ", %u)->"
887 "(" NIP6_FMT ", %u)\n",
888 NIP6(ip6h->saddr), ntohs(th->source),
889 NIP6(ip6h->daddr), ntohs(th->dest));
890 }
891 return 1;
892 }
893
894 if (!hash_location) {
895 if (net_ratelimit()) {
896 printk(KERN_INFO "MD5 Hash expected but NOT found "
897 "(" NIP6_FMT ", %u)->"
898 "(" NIP6_FMT ", %u)\n",
899 NIP6(ip6h->saddr), ntohs(th->source),
900 NIP6(ip6h->daddr), ntohs(th->dest));
901 }
902 return 1;
903 }
904
905
906 genhash = tcp_v6_do_calc_md5_hash(newhash,
907 hash_expected,
908 &ip6h->saddr, &ip6h->daddr,
909 th, sk->sk_protocol,
910 skb->len);
911 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
912 if (net_ratelimit()) {
913 printk(KERN_INFO "MD5 Hash %s for "
914 "(" NIP6_FMT ", %u)->"
915 "(" NIP6_FMT ", %u)\n",
916 genhash ? "failed" : "mismatch",
917 NIP6(ip6h->saddr), ntohs(th->source),
918 NIP6(ip6h->daddr), ntohs(th->dest));
919 }
920 return 1;
921 }
922 return 0;
923}
924#endif
925
926static struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
927 .family = AF_INET6,
928 .obj_size = sizeof(struct tcp6_request_sock),
929 .rtx_syn_ack = tcp_v6_send_synack,
930 .send_ack = tcp_v6_reqsk_send_ack,
931 .destructor = tcp_v6_reqsk_destructor,
932 .send_reset = tcp_v6_send_reset
933};
934
935#ifdef CONFIG_TCP_MD5SIG
936static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
937 .md5_lookup = tcp_v6_reqsk_md5_lookup,
938};
939#endif
940
941static struct timewait_sock_ops tcp6_timewait_sock_ops = {
942 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
943 .twsk_unique = tcp_twsk_unique,
944 .twsk_destructor= tcp_twsk_destructor,
945};
946
947static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
948{
949 struct ipv6_pinfo *np = inet6_sk(sk);
950 struct tcphdr *th = tcp_hdr(skb);
951
952 if (skb->ip_summed == CHECKSUM_PARTIAL) {
953 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
954 skb->csum_start = skb_transport_header(skb) - skb->head;
955 skb->csum_offset = offsetof(struct tcphdr, check);
956 } else {
957 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
958 csum_partial((char *)th, th->doff<<2,
959 skb->csum));
960 }
961}
962
963static int tcp_v6_gso_send_check(struct sk_buff *skb)
964{
965 struct ipv6hdr *ipv6h;
966 struct tcphdr *th;
967
968 if (!pskb_may_pull(skb, sizeof(*th)))
969 return -EINVAL;
970
971 ipv6h = ipv6_hdr(skb);
972 th = tcp_hdr(skb);
973
974 th->check = 0;
975 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
976 IPPROTO_TCP, 0);
977 skb->csum_start = skb_transport_header(skb) - skb->head;
978 skb->csum_offset = offsetof(struct tcphdr, check);
979 skb->ip_summed = CHECKSUM_PARTIAL;
980 return 0;
981}
982
983static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
984{
985 struct tcphdr *th = tcp_hdr(skb), *t1;
986 struct sk_buff *buff;
987 struct flowi fl;
988 int tot_len = sizeof(*th);
989#ifdef CONFIG_TCP_MD5SIG
990 struct tcp_md5sig_key *key;
991#endif
992
993 if (th->rst)
994 return;
995
996 if (!ipv6_unicast_destination(skb))
997 return;
998
999#ifdef CONFIG_TCP_MD5SIG
1000 if (sk)
1001 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
1002 else
1003 key = NULL;
1004
1005 if (key)
1006 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1007#endif
1008
1009
1010
1011
1012
1013
1014 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1015 GFP_ATOMIC);
1016 if (buff == NULL)
1017 return;
1018
1019 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1020
1021 t1 = (struct tcphdr *) skb_push(buff, tot_len);
1022
1023
1024 memset(t1, 0, sizeof(*t1));
1025 t1->dest = th->source;
1026 t1->source = th->dest;
1027 t1->doff = tot_len / 4;
1028 t1->rst = 1;
1029
1030 if(th->ack) {
1031 t1->seq = th->ack_seq;
1032 } else {
1033 t1->ack = 1;
1034 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
1035 + skb->len - (th->doff<<2));
1036 }
1037
1038#ifdef CONFIG_TCP_MD5SIG
1039 if (key) {
1040 __be32 *opt = (__be32*)(t1 + 1);
1041 opt[0] = htonl((TCPOPT_NOP << 24) |
1042 (TCPOPT_NOP << 16) |
1043 (TCPOPT_MD5SIG << 8) |
1044 TCPOLEN_MD5SIG);
1045 tcp_v6_do_calc_md5_hash((__u8 *)&opt[1], key,
1046 &ipv6_hdr(skb)->daddr,
1047 &ipv6_hdr(skb)->saddr,
1048 t1, IPPROTO_TCP, tot_len);
1049 }
1050#endif
1051
1052 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
1053
1054 memset(&fl, 0, sizeof(fl));
1055 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1056 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1057
1058 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1059 sizeof(*t1), IPPROTO_TCP,
1060 buff->csum);
1061
1062 fl.proto = IPPROTO_TCP;
1063 fl.oif = inet6_iif(skb);
1064 fl.fl_ip_dport = t1->dest;
1065 fl.fl_ip_sport = t1->source;
1066 security_skb_classify_flow(skb, &fl);
1067
1068
1069 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
1070
1071 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1072 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
1073 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1074 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1075 return;
1076 }
1077 }
1078
1079 kfree_skb(buff);
1080}
1081
1082static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1083 struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
1084{
1085 struct tcphdr *th = tcp_hdr(skb), *t1;
1086 struct sk_buff *buff;
1087 struct flowi fl;
1088 int tot_len = sizeof(struct tcphdr);
1089 __be32 *topt;
1090#ifdef CONFIG_TCP_MD5SIG
1091 struct tcp_md5sig_key *key;
1092 struct tcp_md5sig_key tw_key;
1093#endif
1094
1095#ifdef CONFIG_TCP_MD5SIG
1096 if (!tw && skb->sk) {
1097 key = tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr);
1098 } else if (tw && tw->tw_md5_keylen) {
1099 tw_key.key = tw->tw_md5_key;
1100 tw_key.keylen = tw->tw_md5_keylen;
1101 key = &tw_key;
1102 } else {
1103 key = NULL;
1104 }
1105#endif
1106
1107 if (ts)
1108 tot_len += TCPOLEN_TSTAMP_ALIGNED;
1109#ifdef CONFIG_TCP_MD5SIG
1110 if (key)
1111 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1112#endif
1113
1114 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1115 GFP_ATOMIC);
1116 if (buff == NULL)
1117 return;
1118
1119 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1120
1121 t1 = (struct tcphdr *) skb_push(buff,tot_len);
1122
1123
1124 memset(t1, 0, sizeof(*t1));
1125 t1->dest = th->source;
1126 t1->source = th->dest;
1127 t1->doff = tot_len/4;
1128 t1->seq = htonl(seq);
1129 t1->ack_seq = htonl(ack);
1130 t1->ack = 1;
1131 t1->window = htons(win);
1132
1133 topt = (__be32 *)(t1 + 1);
1134
1135 if (ts) {
1136 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1137 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1138 *topt++ = htonl(tcp_time_stamp);
1139 *topt = htonl(ts);
1140 }
1141
1142#ifdef CONFIG_TCP_MD5SIG
1143 if (key) {
1144 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1145 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1146 tcp_v6_do_calc_md5_hash((__u8 *)topt, key,
1147 &ipv6_hdr(skb)->daddr,
1148 &ipv6_hdr(skb)->saddr,
1149 t1, IPPROTO_TCP, tot_len);
1150 }
1151#endif
1152
1153 buff->csum = csum_partial((char *)t1, tot_len, 0);
1154
1155 memset(&fl, 0, sizeof(fl));
1156 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1157 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1158
1159 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1160 tot_len, IPPROTO_TCP,
1161 buff->csum);
1162
1163 fl.proto = IPPROTO_TCP;
1164 fl.oif = inet6_iif(skb);
1165 fl.fl_ip_dport = t1->dest;
1166 fl.fl_ip_sport = t1->source;
1167 security_skb_classify_flow(skb, &fl);
1168
1169 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
1170 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1171 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
1172 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1173 return;
1174 }
1175 }
1176
1177 kfree_skb(buff);
1178}
1179
1180static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1181{
1182 struct inet_timewait_sock *tw = inet_twsk(sk);
1183 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1184
1185 tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1186 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1187 tcptw->tw_ts_recent);
1188
1189 inet_twsk_put(tw);
1190}
1191
1192static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1193{
1194 tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
1195}
1196
1197
1198static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1199{
1200 struct request_sock *req, **prev;
1201 const struct tcphdr *th = tcp_hdr(skb);
1202 struct sock *nsk;
1203
1204
1205 req = inet6_csk_search_req(sk, &prev, th->source,
1206 &ipv6_hdr(skb)->saddr,
1207 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1208 if (req)
1209 return tcp_check_req(sk, skb, req, prev);
1210
1211 nsk = __inet6_lookup_established(&tcp_hashinfo, &ipv6_hdr(skb)->saddr,
1212 th->source, &ipv6_hdr(skb)->daddr,
1213 ntohs(th->dest), inet6_iif(skb));
1214
1215 if (nsk) {
1216 if (nsk->sk_state != TCP_TIME_WAIT) {
1217 bh_lock_sock(nsk);
1218 return nsk;
1219 }
1220 inet_twsk_put(inet_twsk(nsk));
1221 return NULL;
1222 }
1223
1224#if 0
1225 if (!th->rst && !th->syn && th->ack)
1226 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt));
1227#endif
1228 return sk;
1229}
1230
1231
1232
1233
1234static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1235{
1236 struct inet6_request_sock *treq;
1237 struct ipv6_pinfo *np = inet6_sk(sk);
1238 struct tcp_options_received tmp_opt;
1239 struct tcp_sock *tp = tcp_sk(sk);
1240 struct request_sock *req = NULL;
1241 __u32 isn = TCP_SKB_CB(skb)->when;
1242
1243 if (skb->protocol == htons(ETH_P_IP))
1244 return tcp_v4_conn_request(sk, skb);
1245
1246 if (!ipv6_unicast_destination(skb))
1247 goto drop;
1248
1249
1250
1251
1252 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1253 if (net_ratelimit())
1254 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
1255 goto drop;
1256 }
1257
1258 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1259 goto drop;
1260
1261 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1262 if (req == NULL)
1263 goto drop;
1264
1265#ifdef CONFIG_TCP_MD5SIG
1266 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1267#endif
1268
1269 tcp_clear_options(&tmp_opt);
1270 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1271 tmp_opt.user_mss = tp->rx_opt.user_mss;
1272
1273 tcp_parse_options(skb, &tmp_opt, 0);
1274
1275 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1276 tcp_openreq_init(req, &tmp_opt, skb);
1277
1278 treq = inet6_rsk(req);
1279 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1280 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1281 TCP_ECN_create_request(req, tcp_hdr(skb));
1282 treq->pktopts = NULL;
1283 if (ipv6_opt_accepted(sk, skb) ||
1284 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1285 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1286 atomic_inc(&skb->users);
1287 treq->pktopts = skb;
1288 }
1289 treq->iif = sk->sk_bound_dev_if;
1290
1291
1292 if (!sk->sk_bound_dev_if &&
1293 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1294 treq->iif = inet6_iif(skb);
1295
1296 if (isn == 0)
1297 isn = tcp_v6_init_sequence(skb);
1298
1299 tcp_rsk(req)->snt_isn = isn;
1300
1301 security_inet_conn_request(sk, skb, req);
1302
1303 if (tcp_v6_send_synack(sk, req, NULL))
1304 goto drop;
1305
1306 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1307 return 0;
1308
1309drop:
1310 if (req)
1311 reqsk_free(req);
1312
1313 return 0;
1314}
1315
1316static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1317 struct request_sock *req,
1318 struct dst_entry *dst)
1319{
1320 struct inet6_request_sock *treq = inet6_rsk(req);
1321 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1322 struct tcp6_sock *newtcp6sk;
1323 struct inet_sock *newinet;
1324 struct tcp_sock *newtp;
1325 struct sock *newsk;
1326 struct ipv6_txoptions *opt;
1327#ifdef CONFIG_TCP_MD5SIG
1328 struct tcp_md5sig_key *key;
1329#endif
1330
1331 if (skb->protocol == htons(ETH_P_IP)) {
1332
1333
1334
1335
1336 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1337
1338 if (newsk == NULL)
1339 return NULL;
1340
1341 newtcp6sk = (struct tcp6_sock *)newsk;
1342 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1343
1344 newinet = inet_sk(newsk);
1345 newnp = inet6_sk(newsk);
1346 newtp = tcp_sk(newsk);
1347
1348 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1349
1350 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1351 newinet->daddr);
1352
1353 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1354 newinet->saddr);
1355
1356 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1357
1358 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1359 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1360#ifdef CONFIG_TCP_MD5SIG
1361 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1362#endif
1363
1364 newnp->pktoptions = NULL;
1365 newnp->opt = NULL;
1366 newnp->mcast_oif = inet6_iif(skb);
1367 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1380
1381 return newsk;
1382 }
1383
1384 opt = np->opt;
1385
1386 if (sk_acceptq_is_full(sk))
1387 goto out_overflow;
1388
1389 if (dst == NULL) {
1390 struct in6_addr *final_p = NULL, final;
1391 struct flowi fl;
1392
1393 memset(&fl, 0, sizeof(fl));
1394 fl.proto = IPPROTO_TCP;
1395 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1396 if (opt && opt->srcrt) {
1397 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1398 ipv6_addr_copy(&final, &fl.fl6_dst);
1399 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1400 final_p = &final;
1401 }
1402 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1403 fl.oif = sk->sk_bound_dev_if;
1404 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1405 fl.fl_ip_sport = inet_sk(sk)->sport;
1406 security_req_classify_flow(req, &fl);
1407
1408 if (ip6_dst_lookup(sk, &dst, &fl))
1409 goto out;
1410
1411 if (final_p)
1412 ipv6_addr_copy(&fl.fl6_dst, final_p);
1413
1414 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1415 goto out;
1416 }
1417
1418 newsk = tcp_create_openreq_child(sk, req, skb);
1419 if (newsk == NULL)
1420 goto out;
1421
1422
1423
1424
1425
1426
1427
1428 newsk->sk_gso_type = SKB_GSO_TCPV6;
1429 __ip6_dst_store(newsk, dst, NULL, NULL);
1430
1431 newtcp6sk = (struct tcp6_sock *)newsk;
1432 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1433
1434 newtp = tcp_sk(newsk);
1435 newinet = inet_sk(newsk);
1436 newnp = inet6_sk(newsk);
1437
1438 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1439
1440 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1441 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1442 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1443 newsk->sk_bound_dev_if = treq->iif;
1444
1445
1446
1447
1448
1449 newinet->opt = NULL;
1450 newnp->ipv6_fl_list = NULL;
1451
1452
1453 newnp->rxopt.all = np->rxopt.all;
1454
1455
1456 newnp->pktoptions = NULL;
1457 if (treq->pktopts != NULL) {
1458 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1459 kfree_skb(treq->pktopts);
1460 treq->pktopts = NULL;
1461 if (newnp->pktoptions)
1462 skb_set_owner_r(newnp->pktoptions, newsk);
1463 }
1464 newnp->opt = NULL;
1465 newnp->mcast_oif = inet6_iif(skb);
1466 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1467
1468
1469
1470
1471
1472
1473
1474 if (opt) {
1475 newnp->opt = ipv6_dup_options(newsk, opt);
1476 if (opt != np->opt)
1477 sock_kfree_s(sk, opt, opt->tot_len);
1478 }
1479
1480 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1481 if (newnp->opt)
1482 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1483 newnp->opt->opt_flen);
1484
1485 tcp_mtup_init(newsk);
1486 tcp_sync_mss(newsk, dst_mtu(dst));
1487 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1488 tcp_initialize_rcv_mss(newsk);
1489
1490 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1491
1492#ifdef CONFIG_TCP_MD5SIG
1493
1494 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1495
1496
1497
1498
1499
1500 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1501 if (newkey != NULL)
1502 tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
1503 newkey, key->keylen);
1504 }
1505#endif
1506
1507 __inet6_hash(&tcp_hashinfo, newsk);
1508 inet_inherit_port(&tcp_hashinfo, sk, newsk);
1509
1510 return newsk;
1511
1512out_overflow:
1513 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1514out:
1515 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1516 if (opt && opt != np->opt)
1517 sock_kfree_s(sk, opt, opt->tot_len);
1518 dst_release(dst);
1519 return NULL;
1520}
1521
1522static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1523{
1524 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1525 if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr,
1526 &ipv6_hdr(skb)->daddr, skb->csum)) {
1527 skb->ip_summed = CHECKSUM_UNNECESSARY;
1528 return 0;
1529 }
1530 }
1531
1532 skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len,
1533 &ipv6_hdr(skb)->saddr,
1534 &ipv6_hdr(skb)->daddr, 0));
1535
1536 if (skb->len <= 76) {
1537 return __skb_checksum_complete(skb);
1538 }
1539 return 0;
1540}
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1551{
1552 struct ipv6_pinfo *np = inet6_sk(sk);
1553 struct tcp_sock *tp;
1554 struct sk_buff *opt_skb = NULL;
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564 if (skb->protocol == htons(ETH_P_IP))
1565 return tcp_v4_do_rcv(sk, skb);
1566
1567#ifdef CONFIG_TCP_MD5SIG
1568 if (tcp_v6_inbound_md5_hash (sk, skb))
1569 goto discard;
1570#endif
1571
1572 if (sk_filter(sk, skb))
1573 goto discard;
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593 if (np->rxopt.all)
1594 opt_skb = skb_clone(skb, GFP_ATOMIC);
1595
1596 if (sk->sk_state == TCP_ESTABLISHED) {
1597 TCP_CHECK_TIMER(sk);
1598 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1599 goto reset;
1600 TCP_CHECK_TIMER(sk);
1601 if (opt_skb)
1602 goto ipv6_pktoptions;
1603 return 0;
1604 }
1605
1606 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1607 goto csum_err;
1608
1609 if (sk->sk_state == TCP_LISTEN) {
1610 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1611 if (!nsk)
1612 goto discard;
1613
1614
1615
1616
1617
1618
1619 if(nsk != sk) {
1620 if (tcp_child_process(sk, nsk, skb))
1621 goto reset;
1622 if (opt_skb)
1623 __kfree_skb(opt_skb);
1624 return 0;
1625 }
1626 }
1627
1628 TCP_CHECK_TIMER(sk);
1629 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1630 goto reset;
1631 TCP_CHECK_TIMER(sk);
1632 if (opt_skb)
1633 goto ipv6_pktoptions;
1634 return 0;
1635
1636reset:
1637 tcp_v6_send_reset(sk, skb);
1638discard:
1639 if (opt_skb)
1640 __kfree_skb(opt_skb);
1641 kfree_skb(skb);
1642 return 0;
1643csum_err:
1644 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1645 goto discard;
1646
1647
1648ipv6_pktoptions:
1649
1650
1651
1652
1653
1654
1655
1656 tp = tcp_sk(sk);
1657 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1658 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1659 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1660 np->mcast_oif = inet6_iif(opt_skb);
1661 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1662 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1663 if (ipv6_opt_accepted(sk, opt_skb)) {
1664 skb_set_owner_r(opt_skb, sk);
1665 opt_skb = xchg(&np->pktoptions, opt_skb);
1666 } else {
1667 __kfree_skb(opt_skb);
1668 opt_skb = xchg(&np->pktoptions, NULL);
1669 }
1670 }
1671
1672 if (opt_skb)
1673 kfree_skb(opt_skb);
1674 return 0;
1675}
1676
1677static int tcp_v6_rcv(struct sk_buff *skb)
1678{
1679 struct tcphdr *th;
1680 struct sock *sk;
1681 int ret;
1682
1683 if (skb->pkt_type != PACKET_HOST)
1684 goto discard_it;
1685
1686
1687
1688
1689 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1690
1691 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1692 goto discard_it;
1693
1694 th = tcp_hdr(skb);
1695
1696 if (th->doff < sizeof(struct tcphdr)/4)
1697 goto bad_packet;
1698 if (!pskb_may_pull(skb, th->doff*4))
1699 goto discard_it;
1700
1701 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1702 goto bad_packet;
1703
1704 th = tcp_hdr(skb);
1705 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1706 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1707 skb->len - th->doff*4);
1708 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1709 TCP_SKB_CB(skb)->when = 0;
1710 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1711 TCP_SKB_CB(skb)->sacked = 0;
1712
1713 sk = __inet6_lookup(&tcp_hashinfo, &ipv6_hdr(skb)->saddr, th->source,
1714 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
1715 inet6_iif(skb));
1716
1717 if (!sk)
1718 goto no_tcp_socket;
1719
1720process:
1721 if (sk->sk_state == TCP_TIME_WAIT)
1722 goto do_time_wait;
1723
1724 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1725 goto discard_and_relse;
1726
1727 if (sk_filter(sk, skb))
1728 goto discard_and_relse;
1729
1730 skb->dev = NULL;
1731
1732 bh_lock_sock_nested(sk);
1733 ret = 0;
1734 if (!sock_owned_by_user(sk)) {
1735#ifdef CONFIG_NET_DMA
1736 struct tcp_sock *tp = tcp_sk(sk);
1737 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1738 tp->ucopy.dma_chan = get_softnet_dma();
1739 if (tp->ucopy.dma_chan)
1740 ret = tcp_v6_do_rcv(sk, skb);
1741 else
1742#endif
1743 {
1744 if (!tcp_prequeue(sk, skb))
1745 ret = tcp_v6_do_rcv(sk, skb);
1746 }
1747 } else
1748 sk_add_backlog(sk, skb);
1749 bh_unlock_sock(sk);
1750
1751 sock_put(sk);
1752 return ret ? -1 : 0;
1753
1754no_tcp_socket:
1755 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1756 goto discard_it;
1757
1758 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1759bad_packet:
1760 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1761 } else {
1762 tcp_v6_send_reset(NULL, skb);
1763 }
1764
1765discard_it:
1766
1767
1768
1769
1770
1771 kfree_skb(skb);
1772 return 0;
1773
1774discard_and_relse:
1775 sock_put(sk);
1776 goto discard_it;
1777
1778do_time_wait:
1779 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1780 inet_twsk_put(inet_twsk(sk));
1781 goto discard_it;
1782 }
1783
1784 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1785 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1786 inet_twsk_put(inet_twsk(sk));
1787 goto discard_it;
1788 }
1789
1790 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1791 case TCP_TW_SYN:
1792 {
1793 struct sock *sk2;
1794
1795 sk2 = inet6_lookup_listener(&tcp_hashinfo,
1796 &ipv6_hdr(skb)->daddr,
1797 ntohs(th->dest), inet6_iif(skb));
1798 if (sk2 != NULL) {
1799 struct inet_timewait_sock *tw = inet_twsk(sk);
1800 inet_twsk_deschedule(tw, &tcp_death_row);
1801 inet_twsk_put(tw);
1802 sk = sk2;
1803 goto process;
1804 }
1805
1806 }
1807 case TCP_TW_ACK:
1808 tcp_v6_timewait_ack(sk, skb);
1809 break;
1810 case TCP_TW_RST:
1811 goto no_tcp_socket;
1812 case TCP_TW_SUCCESS:;
1813 }
1814 goto discard_it;
1815}
1816
1817static int tcp_v6_remember_stamp(struct sock *sk)
1818{
1819
1820 return 0;
1821}
1822
1823static struct inet_connection_sock_af_ops ipv6_specific = {
1824 .queue_xmit = inet6_csk_xmit,
1825 .send_check = tcp_v6_send_check,
1826 .rebuild_header = inet6_sk_rebuild_header,
1827 .conn_request = tcp_v6_conn_request,
1828 .syn_recv_sock = tcp_v6_syn_recv_sock,
1829 .remember_stamp = tcp_v6_remember_stamp,
1830 .net_header_len = sizeof(struct ipv6hdr),
1831 .setsockopt = ipv6_setsockopt,
1832 .getsockopt = ipv6_getsockopt,
1833 .addr2sockaddr = inet6_csk_addr2sockaddr,
1834 .sockaddr_len = sizeof(struct sockaddr_in6),
1835#ifdef CONFIG_COMPAT
1836 .compat_setsockopt = compat_ipv6_setsockopt,
1837 .compat_getsockopt = compat_ipv6_getsockopt,
1838#endif
1839};
1840
1841#ifdef CONFIG_TCP_MD5SIG
1842static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1843 .md5_lookup = tcp_v6_md5_lookup,
1844 .calc_md5_hash = tcp_v6_calc_md5_hash,
1845 .md5_add = tcp_v6_md5_add_func,
1846 .md5_parse = tcp_v6_parse_md5_keys,
1847};
1848#endif
1849
1850
1851
1852
1853
1854static struct inet_connection_sock_af_ops ipv6_mapped = {
1855 .queue_xmit = ip_queue_xmit,
1856 .send_check = tcp_v4_send_check,
1857 .rebuild_header = inet_sk_rebuild_header,
1858 .conn_request = tcp_v6_conn_request,
1859 .syn_recv_sock = tcp_v6_syn_recv_sock,
1860 .remember_stamp = tcp_v4_remember_stamp,
1861 .net_header_len = sizeof(struct iphdr),
1862 .setsockopt = ipv6_setsockopt,
1863 .getsockopt = ipv6_getsockopt,
1864 .addr2sockaddr = inet6_csk_addr2sockaddr,
1865 .sockaddr_len = sizeof(struct sockaddr_in6),
1866#ifdef CONFIG_COMPAT
1867 .compat_setsockopt = compat_ipv6_setsockopt,
1868 .compat_getsockopt = compat_ipv6_getsockopt,
1869#endif
1870};
1871
1872#ifdef CONFIG_TCP_MD5SIG
1873static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1874 .md5_lookup = tcp_v4_md5_lookup,
1875 .calc_md5_hash = tcp_v4_calc_md5_hash,
1876 .md5_add = tcp_v6_md5_add_func,
1877 .md5_parse = tcp_v6_parse_md5_keys,
1878};
1879#endif
1880
1881
1882
1883
1884static int tcp_v6_init_sock(struct sock *sk)
1885{
1886 struct inet_connection_sock *icsk = inet_csk(sk);
1887 struct tcp_sock *tp = tcp_sk(sk);
1888
1889 skb_queue_head_init(&tp->out_of_order_queue);
1890 tcp_init_xmit_timers(sk);
1891 tcp_prequeue_init(tp);
1892
1893 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1894 tp->mdev = TCP_TIMEOUT_INIT;
1895
1896
1897
1898
1899
1900
1901 tp->snd_cwnd = 2;
1902
1903
1904
1905
1906 tp->snd_ssthresh = 0x7fffffff;
1907 tp->snd_cwnd_clamp = ~0;
1908 tp->mss_cache = 536;
1909
1910 tp->reordering = sysctl_tcp_reordering;
1911
1912 sk->sk_state = TCP_CLOSE;
1913
1914 icsk->icsk_af_ops = &ipv6_specific;
1915 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1916 icsk->icsk_sync_mss = tcp_sync_mss;
1917 sk->sk_write_space = sk_stream_write_space;
1918 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1919
1920#ifdef CONFIG_TCP_MD5SIG
1921 tp->af_specific = &tcp_sock_ipv6_specific;
1922#endif
1923
1924 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1925 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1926
1927 atomic_inc(&tcp_sockets_allocated);
1928
1929 return 0;
1930}
1931
1932static int tcp_v6_destroy_sock(struct sock *sk)
1933{
1934#ifdef CONFIG_TCP_MD5SIG
1935
1936 if (tcp_sk(sk)->md5sig_info)
1937 tcp_v6_clear_md5_list(sk);
1938#endif
1939 tcp_v4_destroy_sock(sk);
1940 return inet6_destroy_sock(sk);
1941}
1942
1943#ifdef CONFIG_PROC_FS
1944
1945static void get_openreq6(struct seq_file *seq,
1946 struct sock *sk, struct request_sock *req, int i, int uid)
1947{
1948 int ttd = req->expires - jiffies;
1949 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1950 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1951
1952 if (ttd < 0)
1953 ttd = 0;
1954
1955 seq_printf(seq,
1956 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1957 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1958 i,
1959 src->s6_addr32[0], src->s6_addr32[1],
1960 src->s6_addr32[2], src->s6_addr32[3],
1961 ntohs(inet_sk(sk)->sport),
1962 dest->s6_addr32[0], dest->s6_addr32[1],
1963 dest->s6_addr32[2], dest->s6_addr32[3],
1964 ntohs(inet_rsk(req)->rmt_port),
1965 TCP_SYN_RECV,
1966 0,0,
1967 1,
1968 jiffies_to_clock_t(ttd),
1969 req->retrans,
1970 uid,
1971 0,
1972 0,
1973 0, req);
1974}
1975
1976static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1977{
1978 struct in6_addr *dest, *src;
1979 __u16 destp, srcp;
1980 int timer_active;
1981 unsigned long timer_expires;
1982 struct inet_sock *inet = inet_sk(sp);
1983 struct tcp_sock *tp = tcp_sk(sp);
1984 const struct inet_connection_sock *icsk = inet_csk(sp);
1985 struct ipv6_pinfo *np = inet6_sk(sp);
1986
1987 dest = &np->daddr;
1988 src = &np->rcv_saddr;
1989 destp = ntohs(inet->dport);
1990 srcp = ntohs(inet->sport);
1991
1992 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1993 timer_active = 1;
1994 timer_expires = icsk->icsk_timeout;
1995 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1996 timer_active = 4;
1997 timer_expires = icsk->icsk_timeout;
1998 } else if (timer_pending(&sp->sk_timer)) {
1999 timer_active = 2;
2000 timer_expires = sp->sk_timer.expires;
2001 } else {
2002 timer_active = 0;
2003 timer_expires = jiffies;
2004 }
2005
2006 seq_printf(seq,
2007 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2008 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
2009 i,
2010 src->s6_addr32[0], src->s6_addr32[1],
2011 src->s6_addr32[2], src->s6_addr32[3], srcp,
2012 dest->s6_addr32[0], dest->s6_addr32[1],
2013 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2014 sp->sk_state,
2015 tp->write_seq-tp->snd_una,
2016 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2017 timer_active,
2018 jiffies_to_clock_t(timer_expires - jiffies),
2019 icsk->icsk_retransmits,
2020 sock_i_uid(sp),
2021 icsk->icsk_probes_out,
2022 sock_i_ino(sp),
2023 atomic_read(&sp->sk_refcnt), sp,
2024 icsk->icsk_rto,
2025 icsk->icsk_ack.ato,
2026 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2027 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
2028 );
2029}
2030
2031static void get_timewait6_sock(struct seq_file *seq,
2032 struct inet_timewait_sock *tw, int i)
2033{
2034 struct in6_addr *dest, *src;
2035 __u16 destp, srcp;
2036 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2037 int ttd = tw->tw_ttd - jiffies;
2038
2039 if (ttd < 0)
2040 ttd = 0;
2041
2042 dest = &tw6->tw_v6_daddr;
2043 src = &tw6->tw_v6_rcv_saddr;
2044 destp = ntohs(tw->tw_dport);
2045 srcp = ntohs(tw->tw_sport);
2046
2047 seq_printf(seq,
2048 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2049 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2050 i,
2051 src->s6_addr32[0], src->s6_addr32[1],
2052 src->s6_addr32[2], src->s6_addr32[3], srcp,
2053 dest->s6_addr32[0], dest->s6_addr32[1],
2054 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2055 tw->tw_substate, 0, 0,
2056 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2057 atomic_read(&tw->tw_refcnt), tw);
2058}
2059
2060static int tcp6_seq_show(struct seq_file *seq, void *v)
2061{
2062 struct tcp_iter_state *st;
2063
2064 if (v == SEQ_START_TOKEN) {
2065 seq_puts(seq,
2066 " sl "
2067 "local_address "
2068 "remote_address "
2069 "st tx_queue rx_queue tr tm->when retrnsmt"
2070 " uid timeout inode\n");
2071 goto out;
2072 }
2073 st = seq->private;
2074
2075 switch (st->state) {
2076 case TCP_SEQ_STATE_LISTENING:
2077 case TCP_SEQ_STATE_ESTABLISHED:
2078 get_tcp6_sock(seq, v, st->num);
2079 break;
2080 case TCP_SEQ_STATE_OPENREQ:
2081 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2082 break;
2083 case TCP_SEQ_STATE_TIME_WAIT:
2084 get_timewait6_sock(seq, v, st->num);
2085 break;
2086 }
2087out:
2088 return 0;
2089}
2090
2091static struct file_operations tcp6_seq_fops;
2092static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2093 .owner = THIS_MODULE,
2094 .name = "tcp6",
2095 .family = AF_INET6,
2096 .seq_show = tcp6_seq_show,
2097 .seq_fops = &tcp6_seq_fops,
2098};
2099
2100int __init tcp6_proc_init(void)
2101{
2102 return tcp_proc_register(&tcp6_seq_afinfo);
2103}
2104
2105void tcp6_proc_exit(void)
2106{
2107 tcp_proc_unregister(&tcp6_seq_afinfo);
2108}
2109#endif
2110
2111DEFINE_PROTO_INUSE(tcpv6)
2112
2113struct proto tcpv6_prot = {
2114 .name = "TCPv6",
2115 .owner = THIS_MODULE,
2116 .close = tcp_close,
2117 .connect = tcp_v6_connect,
2118 .disconnect = tcp_disconnect,
2119 .accept = inet_csk_accept,
2120 .ioctl = tcp_ioctl,
2121 .init = tcp_v6_init_sock,
2122 .destroy = tcp_v6_destroy_sock,
2123 .shutdown = tcp_shutdown,
2124 .setsockopt = tcp_setsockopt,
2125 .getsockopt = tcp_getsockopt,
2126 .recvmsg = tcp_recvmsg,
2127 .backlog_rcv = tcp_v6_do_rcv,
2128 .hash = tcp_v6_hash,
2129 .unhash = tcp_unhash,
2130 .get_port = tcp_v6_get_port,
2131 .enter_memory_pressure = tcp_enter_memory_pressure,
2132 .sockets_allocated = &tcp_sockets_allocated,
2133 .memory_allocated = &tcp_memory_allocated,
2134 .memory_pressure = &tcp_memory_pressure,
2135 .orphan_count = &tcp_orphan_count,
2136 .sysctl_mem = sysctl_tcp_mem,
2137 .sysctl_wmem = sysctl_tcp_wmem,
2138 .sysctl_rmem = sysctl_tcp_rmem,
2139 .max_header = MAX_TCP_HEADER,
2140 .obj_size = sizeof(struct tcp6_sock),
2141 .twsk_prot = &tcp6_timewait_sock_ops,
2142 .rsk_prot = &tcp6_request_sock_ops,
2143#ifdef CONFIG_COMPAT
2144 .compat_setsockopt = compat_tcp_setsockopt,
2145 .compat_getsockopt = compat_tcp_getsockopt,
2146#endif
2147 REF_PROTO_INUSE(tcpv6)
2148};
2149
2150static struct inet6_protocol tcpv6_protocol = {
2151 .handler = tcp_v6_rcv,
2152 .err_handler = tcp_v6_err,
2153 .gso_send_check = tcp_v6_gso_send_check,
2154 .gso_segment = tcp_tso_segment,
2155 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2156};
2157
2158static struct inet_protosw tcpv6_protosw = {
2159 .type = SOCK_STREAM,
2160 .protocol = IPPROTO_TCP,
2161 .prot = &tcpv6_prot,
2162 .ops = &inet6_stream_ops,
2163 .capability = -1,
2164 .no_check = 0,
2165 .flags = INET_PROTOSW_PERMANENT |
2166 INET_PROTOSW_ICSK,
2167};
2168
2169void __init tcpv6_init(void)
2170{
2171
2172 if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0)
2173 printk(KERN_ERR "tcpv6_init: Could not register protocol\n");
2174 inet6_register_protosw(&tcpv6_protosw);
2175
2176 if (inet_csk_ctl_sock_create(&tcp6_socket, PF_INET6, SOCK_RAW,
2177 IPPROTO_TCP) < 0)
2178 panic("Failed to create the TCPv6 control socket.\n");
2179}
2180