1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#include <linux/export.h>
38#include <net/sock.h>
39
40#include "core.h"
41#include "port.h"
42
43#define SS_LISTENING -1
44#define SS_READY -2
45
46#define OVERLOAD_LIMIT_BASE 5000
47#define CONN_TIMEOUT_DEFAULT 8000
48
49struct tipc_sock {
50 struct sock sk;
51 struct tipc_port *p;
52 struct tipc_portid peer_name;
53 unsigned int conn_timeout;
54};
55
56#define tipc_sk(sk) ((struct tipc_sock *)(sk))
57#define tipc_sk_port(sk) ((struct tipc_port *)(tipc_sk(sk)->p))
58
59#define tipc_rx_ready(sock) (!skb_queue_empty(&sock->sk->sk_receive_queue) || \
60 (sock->state == SS_DISCONNECTING))
61
62static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
63static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
64static void wakeupdispatch(struct tipc_port *tport);
65
66static const struct proto_ops packet_ops;
67static const struct proto_ops stream_ops;
68static const struct proto_ops msg_ops;
69
70static struct proto tipc_proto;
71
72static int sockets_enabled;
73
74static atomic_t tipc_queue_size = ATOMIC_INIT(0);
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127static void advance_rx_queue(struct sock *sk)
128{
129 buf_discard(__skb_dequeue(&sk->sk_receive_queue));
130 atomic_dec(&tipc_queue_size);
131}
132
133
134
135
136
137
138
139static void discard_rx_queue(struct sock *sk)
140{
141 struct sk_buff *buf;
142
143 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
144 atomic_dec(&tipc_queue_size);
145 buf_discard(buf);
146 }
147}
148
149
150
151
152
153
154
155static void reject_rx_queue(struct sock *sk)
156{
157 struct sk_buff *buf;
158
159 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
160 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
161 atomic_dec(&tipc_queue_size);
162 }
163}
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178static int tipc_create(struct net *net, struct socket *sock, int protocol,
179 int kern)
180{
181 const struct proto_ops *ops;
182 socket_state state;
183 struct sock *sk;
184 struct tipc_port *tp_ptr;
185
186
187
188 if (unlikely(protocol != 0))
189 return -EPROTONOSUPPORT;
190
191 switch (sock->type) {
192 case SOCK_STREAM:
193 ops = &stream_ops;
194 state = SS_UNCONNECTED;
195 break;
196 case SOCK_SEQPACKET:
197 ops = &packet_ops;
198 state = SS_UNCONNECTED;
199 break;
200 case SOCK_DGRAM:
201 case SOCK_RDM:
202 ops = &msg_ops;
203 state = SS_READY;
204 break;
205 default:
206 return -EPROTOTYPE;
207 }
208
209
210
211 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
212 if (sk == NULL)
213 return -ENOMEM;
214
215
216
217 tp_ptr = tipc_createport_raw(sk, &dispatch, &wakeupdispatch,
218 TIPC_LOW_IMPORTANCE);
219 if (unlikely(!tp_ptr)) {
220 sk_free(sk);
221 return -ENOMEM;
222 }
223
224
225
226 sock->ops = ops;
227 sock->state = state;
228
229 sock_init_data(sock, sk);
230 sk->sk_backlog_rcv = backlog_rcv;
231 tipc_sk(sk)->p = tp_ptr;
232 tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT;
233
234 spin_unlock_bh(tp_ptr->lock);
235
236 if (sock->state == SS_READY) {
237 tipc_set_portunreturnable(tp_ptr->ref, 1);
238 if (sock->type == SOCK_DGRAM)
239 tipc_set_portunreliable(tp_ptr->ref, 1);
240 }
241
242 return 0;
243}
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262static int release(struct socket *sock)
263{
264 struct sock *sk = sock->sk;
265 struct tipc_port *tport;
266 struct sk_buff *buf;
267 int res;
268
269
270
271
272
273
274 if (sk == NULL)
275 return 0;
276
277 tport = tipc_sk_port(sk);
278 lock_sock(sk);
279
280
281
282
283
284
285 while (sock->state != SS_DISCONNECTING) {
286 buf = __skb_dequeue(&sk->sk_receive_queue);
287 if (buf == NULL)
288 break;
289 atomic_dec(&tipc_queue_size);
290 if (TIPC_SKB_CB(buf)->handle != 0)
291 buf_discard(buf);
292 else {
293 if ((sock->state == SS_CONNECTING) ||
294 (sock->state == SS_CONNECTED)) {
295 sock->state = SS_DISCONNECTING;
296 tipc_disconnect(tport->ref);
297 }
298 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
299 }
300 }
301
302
303
304
305
306
307 res = tipc_deleteport(tport->ref);
308
309
310
311 discard_rx_queue(sk);
312
313
314
315 sock->state = SS_DISCONNECTING;
316 release_sock(sk);
317
318 sock_put(sk);
319 sock->sk = NULL;
320
321 return res;
322}
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
341{
342 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
343 u32 portref = tipc_sk_port(sock->sk)->ref;
344
345 if (unlikely(!uaddr_len))
346 return tipc_withdraw(portref, 0, NULL);
347
348 if (uaddr_len < sizeof(struct sockaddr_tipc))
349 return -EINVAL;
350 if (addr->family != AF_TIPC)
351 return -EAFNOSUPPORT;
352
353 if (addr->addrtype == TIPC_ADDR_NAME)
354 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
355 else if (addr->addrtype != TIPC_ADDR_NAMESEQ)
356 return -EAFNOSUPPORT;
357
358 return (addr->scope > 0) ?
359 tipc_publish(portref, addr->scope, &addr->addr.nameseq) :
360 tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq);
361}
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377static int get_name(struct socket *sock, struct sockaddr *uaddr,
378 int *uaddr_len, int peer)
379{
380 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
381 struct tipc_sock *tsock = tipc_sk(sock->sk);
382
383 memset(addr, 0, sizeof(*addr));
384 if (peer) {
385 if ((sock->state != SS_CONNECTED) &&
386 ((peer != 2) || (sock->state != SS_DISCONNECTING)))
387 return -ENOTCONN;
388 addr->addr.id.ref = tsock->peer_name.ref;
389 addr->addr.id.node = tsock->peer_name.node;
390 } else {
391 addr->addr.id.ref = tsock->p->ref;
392 addr->addr.id.node = tipc_own_addr;
393 }
394
395 *uaddr_len = sizeof(*addr);
396 addr->addrtype = TIPC_ADDR_ID;
397 addr->family = AF_TIPC;
398 addr->scope = 0;
399 addr->addr.name.domain = 0;
400
401 return 0;
402}
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445static unsigned int poll(struct file *file, struct socket *sock,
446 poll_table *wait)
447{
448 struct sock *sk = sock->sk;
449 u32 mask = 0;
450
451 poll_wait(file, sk_sleep(sk), wait);
452
453 switch ((int)sock->state) {
454 case SS_READY:
455 case SS_CONNECTED:
456 if (!tipc_sk_port(sk)->congested)
457 mask |= POLLOUT;
458
459 case SS_CONNECTING:
460 case SS_LISTENING:
461 if (!skb_queue_empty(&sk->sk_receive_queue))
462 mask |= (POLLIN | POLLRDNORM);
463 break;
464 case SS_DISCONNECTING:
465 mask = (POLLIN | POLLRDNORM | POLLHUP);
466 break;
467 }
468
469 return mask;
470}
471
472
473
474
475
476
477
478
479
480
481
482
483static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
484{
485 struct tipc_cfg_msg_hdr hdr;
486
487 if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES))
488 return 0;
489 if (likely(dest->addr.name.name.type == TIPC_TOP_SRV))
490 return 0;
491 if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
492 return -EACCES;
493
494 if (!m->msg_iovlen || (m->msg_iov[0].iov_len < sizeof(hdr)))
495 return -EMSGSIZE;
496 if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
497 return -EFAULT;
498 if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN)))
499 return -EACCES;
500
501 return 0;
502}
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519static int send_msg(struct kiocb *iocb, struct socket *sock,
520 struct msghdr *m, size_t total_len)
521{
522 struct sock *sk = sock->sk;
523 struct tipc_port *tport = tipc_sk_port(sk);
524 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
525 int needs_conn;
526 long timeout_val;
527 int res = -EINVAL;
528
529 if (unlikely(!dest))
530 return -EDESTADDRREQ;
531 if (unlikely((m->msg_namelen < sizeof(*dest)) ||
532 (dest->family != AF_TIPC)))
533 return -EINVAL;
534 if ((total_len > TIPC_MAX_USER_MSG_SIZE) ||
535 (m->msg_iovlen > (unsigned)INT_MAX))
536 return -EMSGSIZE;
537
538 if (iocb)
539 lock_sock(sk);
540
541 needs_conn = (sock->state != SS_READY);
542 if (unlikely(needs_conn)) {
543 if (sock->state == SS_LISTENING) {
544 res = -EPIPE;
545 goto exit;
546 }
547 if (sock->state != SS_UNCONNECTED) {
548 res = -EISCONN;
549 goto exit;
550 }
551 if ((tport->published) ||
552 ((sock->type == SOCK_STREAM) && (total_len != 0))) {
553 res = -EOPNOTSUPP;
554 goto exit;
555 }
556 if (dest->addrtype == TIPC_ADDR_NAME) {
557 tport->conn_type = dest->addr.name.name.type;
558 tport->conn_instance = dest->addr.name.name.instance;
559 }
560
561
562
563 reject_rx_queue(sk);
564 }
565
566 timeout_val = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
567
568 do {
569 if (dest->addrtype == TIPC_ADDR_NAME) {
570 res = dest_name_check(dest, m);
571 if (res)
572 break;
573 res = tipc_send2name(tport->ref,
574 &dest->addr.name.name,
575 dest->addr.name.domain,
576 m->msg_iovlen,
577 m->msg_iov,
578 total_len);
579 } else if (dest->addrtype == TIPC_ADDR_ID) {
580 res = tipc_send2port(tport->ref,
581 &dest->addr.id,
582 m->msg_iovlen,
583 m->msg_iov,
584 total_len);
585 } else if (dest->addrtype == TIPC_ADDR_MCAST) {
586 if (needs_conn) {
587 res = -EOPNOTSUPP;
588 break;
589 }
590 res = dest_name_check(dest, m);
591 if (res)
592 break;
593 res = tipc_multicast(tport->ref,
594 &dest->addr.nameseq,
595 m->msg_iovlen,
596 m->msg_iov,
597 total_len);
598 }
599 if (likely(res != -ELINKCONG)) {
600 if (needs_conn && (res >= 0))
601 sock->state = SS_CONNECTING;
602 break;
603 }
604 if (timeout_val <= 0L) {
605 res = timeout_val ? timeout_val : -EWOULDBLOCK;
606 break;
607 }
608 release_sock(sk);
609 timeout_val = wait_event_interruptible_timeout(*sk_sleep(sk),
610 !tport->congested, timeout_val);
611 lock_sock(sk);
612 } while (1);
613
614exit:
615 if (iocb)
616 release_sock(sk);
617 return res;
618}
619
620
621
622
623
624
625
626
627
628
629
630
631
632static int send_packet(struct kiocb *iocb, struct socket *sock,
633 struct msghdr *m, size_t total_len)
634{
635 struct sock *sk = sock->sk;
636 struct tipc_port *tport = tipc_sk_port(sk);
637 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
638 long timeout_val;
639 int res;
640
641
642
643 if (unlikely(dest))
644 return send_msg(iocb, sock, m, total_len);
645
646 if ((total_len > TIPC_MAX_USER_MSG_SIZE) ||
647 (m->msg_iovlen > (unsigned)INT_MAX))
648 return -EMSGSIZE;
649
650 if (iocb)
651 lock_sock(sk);
652
653 timeout_val = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
654
655 do {
656 if (unlikely(sock->state != SS_CONNECTED)) {
657 if (sock->state == SS_DISCONNECTING)
658 res = -EPIPE;
659 else
660 res = -ENOTCONN;
661 break;
662 }
663
664 res = tipc_send(tport->ref, m->msg_iovlen, m->msg_iov,
665 total_len);
666 if (likely(res != -ELINKCONG))
667 break;
668 if (timeout_val <= 0L) {
669 res = timeout_val ? timeout_val : -EWOULDBLOCK;
670 break;
671 }
672 release_sock(sk);
673 timeout_val = wait_event_interruptible_timeout(*sk_sleep(sk),
674 (!tport->congested || !tport->connected), timeout_val);
675 lock_sock(sk);
676 } while (1);
677
678 if (iocb)
679 release_sock(sk);
680 return res;
681}
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696static int send_stream(struct kiocb *iocb, struct socket *sock,
697 struct msghdr *m, size_t total_len)
698{
699 struct sock *sk = sock->sk;
700 struct tipc_port *tport = tipc_sk_port(sk);
701 struct msghdr my_msg;
702 struct iovec my_iov;
703 struct iovec *curr_iov;
704 int curr_iovlen;
705 char __user *curr_start;
706 u32 hdr_size;
707 int curr_left;
708 int bytes_to_send;
709 int bytes_sent;
710 int res;
711
712 lock_sock(sk);
713
714
715
716 if (unlikely(sock->state != SS_CONNECTED)) {
717 if (sock->state == SS_UNCONNECTED) {
718 res = send_packet(NULL, sock, m, total_len);
719 goto exit;
720 } else if (sock->state == SS_DISCONNECTING) {
721 res = -EPIPE;
722 goto exit;
723 } else {
724 res = -ENOTCONN;
725 goto exit;
726 }
727 }
728
729 if (unlikely(m->msg_name)) {
730 res = -EISCONN;
731 goto exit;
732 }
733
734 if ((total_len > (unsigned)INT_MAX) ||
735 (m->msg_iovlen > (unsigned)INT_MAX)) {
736 res = -EMSGSIZE;
737 goto exit;
738 }
739
740
741
742
743
744
745
746
747
748 curr_iov = m->msg_iov;
749 curr_iovlen = m->msg_iovlen;
750 my_msg.msg_iov = &my_iov;
751 my_msg.msg_iovlen = 1;
752 my_msg.msg_flags = m->msg_flags;
753 my_msg.msg_name = NULL;
754 bytes_sent = 0;
755
756 hdr_size = msg_hdr_sz(&tport->phdr);
757
758 while (curr_iovlen--) {
759 curr_start = curr_iov->iov_base;
760 curr_left = curr_iov->iov_len;
761
762 while (curr_left) {
763 bytes_to_send = tport->max_pkt - hdr_size;
764 if (bytes_to_send > TIPC_MAX_USER_MSG_SIZE)
765 bytes_to_send = TIPC_MAX_USER_MSG_SIZE;
766 if (curr_left < bytes_to_send)
767 bytes_to_send = curr_left;
768 my_iov.iov_base = curr_start;
769 my_iov.iov_len = bytes_to_send;
770 res = send_packet(NULL, sock, &my_msg, bytes_to_send);
771 if (res < 0) {
772 if (bytes_sent)
773 res = bytes_sent;
774 goto exit;
775 }
776 curr_left -= bytes_to_send;
777 curr_start += bytes_to_send;
778 bytes_sent += bytes_to_send;
779 }
780
781 curr_iov++;
782 }
783 res = bytes_sent;
784exit:
785 release_sock(sk);
786 return res;
787}
788
789
790
791
792
793
794
795
796
797static int auto_connect(struct socket *sock, struct tipc_msg *msg)
798{
799 struct tipc_sock *tsock = tipc_sk(sock->sk);
800
801 if (msg_errcode(msg)) {
802 sock->state = SS_DISCONNECTING;
803 return -ECONNREFUSED;
804 }
805
806 tsock->peer_name.ref = msg_origport(msg);
807 tsock->peer_name.node = msg_orignode(msg);
808 tipc_connect2port(tsock->p->ref, &tsock->peer_name);
809 tipc_set_portimportance(tsock->p->ref, msg_importance(msg));
810 sock->state = SS_CONNECTED;
811 return 0;
812}
813
814
815
816
817
818
819
820
821
822static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
823{
824 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name;
825
826 if (addr) {
827 addr->family = AF_TIPC;
828 addr->addrtype = TIPC_ADDR_ID;
829 addr->addr.id.ref = msg_origport(msg);
830 addr->addr.id.node = msg_orignode(msg);
831 addr->addr.name.domain = 0;
832 addr->scope = 0;
833 m->msg_namelen = sizeof(struct sockaddr_tipc);
834 }
835}
836
837
838
839
840
841
842
843
844
845
846
847
848static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
849 struct tipc_port *tport)
850{
851 u32 anc_data[3];
852 u32 err;
853 u32 dest_type;
854 int has_name;
855 int res;
856
857 if (likely(m->msg_controllen == 0))
858 return 0;
859
860
861
862 err = msg ? msg_errcode(msg) : 0;
863 if (unlikely(err)) {
864 anc_data[0] = err;
865 anc_data[1] = msg_data_sz(msg);
866 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
867 if (res)
868 return res;
869 if (anc_data[1]) {
870 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
871 msg_data(msg));
872 if (res)
873 return res;
874 }
875 }
876
877
878
879 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
880 switch (dest_type) {
881 case TIPC_NAMED_MSG:
882 has_name = 1;
883 anc_data[0] = msg_nametype(msg);
884 anc_data[1] = msg_namelower(msg);
885 anc_data[2] = msg_namelower(msg);
886 break;
887 case TIPC_MCAST_MSG:
888 has_name = 1;
889 anc_data[0] = msg_nametype(msg);
890 anc_data[1] = msg_namelower(msg);
891 anc_data[2] = msg_nameupper(msg);
892 break;
893 case TIPC_CONN_MSG:
894 has_name = (tport->conn_type != 0);
895 anc_data[0] = tport->conn_type;
896 anc_data[1] = tport->conn_instance;
897 anc_data[2] = tport->conn_instance;
898 break;
899 default:
900 has_name = 0;
901 }
902 if (has_name) {
903 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
904 if (res)
905 return res;
906 }
907
908 return 0;
909}
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924static int recv_msg(struct kiocb *iocb, struct socket *sock,
925 struct msghdr *m, size_t buf_len, int flags)
926{
927 struct sock *sk = sock->sk;
928 struct tipc_port *tport = tipc_sk_port(sk);
929 struct sk_buff *buf;
930 struct tipc_msg *msg;
931 long timeout;
932 unsigned int sz;
933 u32 err;
934 int res;
935
936
937
938 if (unlikely(!buf_len))
939 return -EINVAL;
940
941 lock_sock(sk);
942
943 if (unlikely(sock->state == SS_UNCONNECTED)) {
944 res = -ENOTCONN;
945 goto exit;
946 }
947
948 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
949restart:
950
951
952
953 while (skb_queue_empty(&sk->sk_receive_queue)) {
954 if (sock->state == SS_DISCONNECTING) {
955 res = -ENOTCONN;
956 goto exit;
957 }
958 if (timeout <= 0L) {
959 res = timeout ? timeout : -EWOULDBLOCK;
960 goto exit;
961 }
962 release_sock(sk);
963 timeout = wait_event_interruptible_timeout(*sk_sleep(sk),
964 tipc_rx_ready(sock),
965 timeout);
966 lock_sock(sk);
967 }
968
969
970
971 buf = skb_peek(&sk->sk_receive_queue);
972 msg = buf_msg(buf);
973 sz = msg_data_sz(msg);
974 err = msg_errcode(msg);
975
976
977
978 if (unlikely(sock->state == SS_CONNECTING)) {
979 res = auto_connect(sock, msg);
980 if (res)
981 goto exit;
982 }
983
984
985
986 if ((!sz) && (!err)) {
987 advance_rx_queue(sk);
988 goto restart;
989 }
990
991
992
993 set_orig_addr(m, msg);
994
995
996
997 res = anc_data_recv(m, msg, tport);
998 if (res)
999 goto exit;
1000
1001
1002
1003 if (!err) {
1004 if (unlikely(buf_len < sz)) {
1005 sz = buf_len;
1006 m->msg_flags |= MSG_TRUNC;
1007 }
1008 res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg),
1009 m->msg_iov, sz);
1010 if (res)
1011 goto exit;
1012 res = sz;
1013 } else {
1014 if ((sock->state == SS_READY) ||
1015 ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
1016 res = 0;
1017 else
1018 res = -ECONNRESET;
1019 }
1020
1021
1022
1023 if (likely(!(flags & MSG_PEEK))) {
1024 if ((sock->state != SS_READY) &&
1025 (++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
1026 tipc_acknowledge(tport->ref, tport->conn_unacked);
1027 advance_rx_queue(sk);
1028 }
1029exit:
1030 release_sock(sk);
1031 return res;
1032}
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047static int recv_stream(struct kiocb *iocb, struct socket *sock,
1048 struct msghdr *m, size_t buf_len, int flags)
1049{
1050 struct sock *sk = sock->sk;
1051 struct tipc_port *tport = tipc_sk_port(sk);
1052 struct sk_buff *buf;
1053 struct tipc_msg *msg;
1054 long timeout;
1055 unsigned int sz;
1056 int sz_to_copy, target, needed;
1057 int sz_copied = 0;
1058 u32 err;
1059 int res = 0;
1060
1061
1062
1063 if (unlikely(!buf_len))
1064 return -EINVAL;
1065
1066 lock_sock(sk);
1067
1068 if (unlikely((sock->state == SS_UNCONNECTED) ||
1069 (sock->state == SS_CONNECTING))) {
1070 res = -ENOTCONN;
1071 goto exit;
1072 }
1073
1074 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1075 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1076restart:
1077
1078
1079
1080 while (skb_queue_empty(&sk->sk_receive_queue)) {
1081 if (sock->state == SS_DISCONNECTING) {
1082 res = -ENOTCONN;
1083 goto exit;
1084 }
1085 if (timeout <= 0L) {
1086 res = timeout ? timeout : -EWOULDBLOCK;
1087 goto exit;
1088 }
1089 release_sock(sk);
1090 timeout = wait_event_interruptible_timeout(*sk_sleep(sk),
1091 tipc_rx_ready(sock),
1092 timeout);
1093 lock_sock(sk);
1094 }
1095
1096
1097
1098 buf = skb_peek(&sk->sk_receive_queue);
1099 msg = buf_msg(buf);
1100 sz = msg_data_sz(msg);
1101 err = msg_errcode(msg);
1102
1103
1104
1105 if ((!sz) && (!err)) {
1106 advance_rx_queue(sk);
1107 goto restart;
1108 }
1109
1110
1111
1112 if (sz_copied == 0) {
1113 set_orig_addr(m, msg);
1114 res = anc_data_recv(m, msg, tport);
1115 if (res)
1116 goto exit;
1117 }
1118
1119
1120
1121 if (!err) {
1122 u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);
1123
1124 sz -= offset;
1125 needed = (buf_len - sz_copied);
1126 sz_to_copy = (sz <= needed) ? sz : needed;
1127
1128 res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg) + offset,
1129 m->msg_iov, sz_to_copy);
1130 if (res)
1131 goto exit;
1132
1133 sz_copied += sz_to_copy;
1134
1135 if (sz_to_copy < sz) {
1136 if (!(flags & MSG_PEEK))
1137 TIPC_SKB_CB(buf)->handle =
1138 (void *)(unsigned long)(offset + sz_to_copy);
1139 goto exit;
1140 }
1141 } else {
1142 if (sz_copied != 0)
1143 goto exit;
1144
1145 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
1146 res = 0;
1147 else
1148 res = -ECONNRESET;
1149 }
1150
1151
1152
1153 if (likely(!(flags & MSG_PEEK))) {
1154 if (unlikely(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
1155 tipc_acknowledge(tport->ref, tport->conn_unacked);
1156 advance_rx_queue(sk);
1157 }
1158
1159
1160
1161 if ((sz_copied < buf_len) &&
1162 (!skb_queue_empty(&sk->sk_receive_queue) ||
1163 (sz_copied < target)) &&
1164 (!(flags & MSG_PEEK)) &&
1165 (!err))
1166 goto restart;
1167
1168exit:
1169 release_sock(sk);
1170 return sz_copied ? sz_copied : res;
1171}
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
1183{
1184 u32 threshold;
1185 u32 imp = msg_importance(msg);
1186
1187 if (imp == TIPC_LOW_IMPORTANCE)
1188 threshold = base;
1189 else if (imp == TIPC_MEDIUM_IMPORTANCE)
1190 threshold = base * 2;
1191 else if (imp == TIPC_HIGH_IMPORTANCE)
1192 threshold = base * 100;
1193 else
1194 return 0;
1195
1196 if (msg_connected(msg))
1197 threshold *= 4;
1198
1199 return queue_size >= threshold;
1200}
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1216{
1217 struct socket *sock = sk->sk_socket;
1218 struct tipc_msg *msg = buf_msg(buf);
1219 u32 recv_q_len;
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229 if (sock->state == SS_READY) {
1230 if (msg_connected(msg))
1231 return TIPC_ERR_NO_PORT;
1232 } else {
1233 if (msg_mcast(msg))
1234 return TIPC_ERR_NO_PORT;
1235 if (sock->state == SS_CONNECTED) {
1236 if (!msg_connected(msg))
1237 return TIPC_ERR_NO_PORT;
1238 } else if (sock->state == SS_CONNECTING) {
1239 if (!msg_connected(msg) && (msg_errcode(msg) == 0))
1240 return TIPC_ERR_NO_PORT;
1241 } else if (sock->state == SS_LISTENING) {
1242 if (msg_connected(msg) || msg_errcode(msg))
1243 return TIPC_ERR_NO_PORT;
1244 } else if (sock->state == SS_DISCONNECTING) {
1245 return TIPC_ERR_NO_PORT;
1246 } else {
1247 if (msg_connected(msg) || msg_errcode(msg))
1248 return TIPC_ERR_NO_PORT;
1249 }
1250 }
1251
1252
1253
1254 recv_q_len = (u32)atomic_read(&tipc_queue_size);
1255 if (unlikely(recv_q_len >= OVERLOAD_LIMIT_BASE)) {
1256 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE))
1257 return TIPC_ERR_OVERLOAD;
1258 }
1259 recv_q_len = skb_queue_len(&sk->sk_receive_queue);
1260 if (unlikely(recv_q_len >= (OVERLOAD_LIMIT_BASE / 2))) {
1261 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE / 2))
1262 return TIPC_ERR_OVERLOAD;
1263 }
1264
1265
1266
1267 TIPC_SKB_CB(buf)->handle = 0;
1268 atomic_inc(&tipc_queue_size);
1269 __skb_queue_tail(&sk->sk_receive_queue, buf);
1270
1271
1272
1273 if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
1274 sock->state = SS_DISCONNECTING;
1275 tipc_disconnect_port(tipc_sk_port(sk));
1276 }
1277
1278 if (waitqueue_active(sk_sleep(sk)))
1279 wake_up_interruptible(sk_sleep(sk));
1280 return TIPC_OK;
1281}
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293static int backlog_rcv(struct sock *sk, struct sk_buff *buf)
1294{
1295 u32 res;
1296
1297 res = filter_rcv(sk, buf);
1298 if (res)
1299 tipc_reject_msg(buf, res);
1300 return 0;
1301}
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1314{
1315 struct sock *sk = (struct sock *)tport->usr_handle;
1316 u32 res;
1317
1318
1319
1320
1321
1322
1323
1324
1325 bh_lock_sock(sk);
1326 if (!sock_owned_by_user(sk)) {
1327 res = filter_rcv(sk, buf);
1328 } else {
1329 if (sk_add_backlog(sk, buf))
1330 res = TIPC_ERR_OVERLOAD;
1331 else
1332 res = TIPC_OK;
1333 }
1334 bh_unlock_sock(sk);
1335
1336 return res;
1337}
1338
1339
1340
1341
1342
1343
1344
1345
1346static void wakeupdispatch(struct tipc_port *tport)
1347{
1348 struct sock *sk = (struct sock *)tport->usr_handle;
1349
1350 if (waitqueue_active(sk_sleep(sk)))
1351 wake_up_interruptible(sk_sleep(sk));
1352}
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1365 int flags)
1366{
1367 struct sock *sk = sock->sk;
1368 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1369 struct msghdr m = {NULL,};
1370 struct sk_buff *buf;
1371 struct tipc_msg *msg;
1372 unsigned int timeout;
1373 int res;
1374
1375 lock_sock(sk);
1376
1377
1378
1379 if (sock->state == SS_READY) {
1380 res = -EOPNOTSUPP;
1381 goto exit;
1382 }
1383
1384
1385
1386 if (flags & O_NONBLOCK) {
1387 res = -EOPNOTSUPP;
1388 goto exit;
1389 }
1390
1391
1392
1393 if (sock->state == SS_LISTENING) {
1394 res = -EOPNOTSUPP;
1395 goto exit;
1396 }
1397 if (sock->state == SS_CONNECTING) {
1398 res = -EALREADY;
1399 goto exit;
1400 }
1401 if (sock->state != SS_UNCONNECTED) {
1402 res = -EISCONN;
1403 goto exit;
1404 }
1405
1406
1407
1408
1409
1410
1411
1412
1413 if (dst->addrtype == TIPC_ADDR_MCAST) {
1414 res = -EINVAL;
1415 goto exit;
1416 }
1417
1418
1419
1420 reject_rx_queue(sk);
1421
1422
1423
1424 m.msg_name = dest;
1425 m.msg_namelen = destlen;
1426 res = send_msg(NULL, sock, &m, 0);
1427 if (res < 0)
1428 goto exit;
1429
1430
1431
1432 timeout = tipc_sk(sk)->conn_timeout;
1433 release_sock(sk);
1434 res = wait_event_interruptible_timeout(*sk_sleep(sk),
1435 (!skb_queue_empty(&sk->sk_receive_queue) ||
1436 (sock->state != SS_CONNECTING)),
1437 timeout ? (long)msecs_to_jiffies(timeout)
1438 : MAX_SCHEDULE_TIMEOUT);
1439 lock_sock(sk);
1440
1441 if (res > 0) {
1442 buf = skb_peek(&sk->sk_receive_queue);
1443 if (buf != NULL) {
1444 msg = buf_msg(buf);
1445 res = auto_connect(sock, msg);
1446 if (!res) {
1447 if (!msg_data_sz(msg))
1448 advance_rx_queue(sk);
1449 }
1450 } else {
1451 if (sock->state == SS_CONNECTED)
1452 res = -EISCONN;
1453 else
1454 res = -ECONNREFUSED;
1455 }
1456 } else {
1457 if (res == 0)
1458 res = -ETIMEDOUT;
1459 else
1460 ;
1461 sock->state = SS_DISCONNECTING;
1462 }
1463
1464exit:
1465 release_sock(sk);
1466 return res;
1467}
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477static int listen(struct socket *sock, int len)
1478{
1479 struct sock *sk = sock->sk;
1480 int res;
1481
1482 lock_sock(sk);
1483
1484 if (sock->state != SS_UNCONNECTED)
1485 res = -EINVAL;
1486 else {
1487 sock->state = SS_LISTENING;
1488 res = 0;
1489 }
1490
1491 release_sock(sk);
1492 return res;
1493}
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504static int accept(struct socket *sock, struct socket *new_sock, int flags)
1505{
1506 struct sock *sk = sock->sk;
1507 struct sk_buff *buf;
1508 int res;
1509
1510 lock_sock(sk);
1511
1512 if (sock->state != SS_LISTENING) {
1513 res = -EINVAL;
1514 goto exit;
1515 }
1516
1517 while (skb_queue_empty(&sk->sk_receive_queue)) {
1518 if (flags & O_NONBLOCK) {
1519 res = -EWOULDBLOCK;
1520 goto exit;
1521 }
1522 release_sock(sk);
1523 res = wait_event_interruptible(*sk_sleep(sk),
1524 (!skb_queue_empty(&sk->sk_receive_queue)));
1525 lock_sock(sk);
1526 if (res)
1527 goto exit;
1528 }
1529
1530 buf = skb_peek(&sk->sk_receive_queue);
1531
1532 res = tipc_create(sock_net(sock->sk), new_sock, 0, 0);
1533 if (!res) {
1534 struct sock *new_sk = new_sock->sk;
1535 struct tipc_sock *new_tsock = tipc_sk(new_sk);
1536 struct tipc_port *new_tport = new_tsock->p;
1537 u32 new_ref = new_tport->ref;
1538 struct tipc_msg *msg = buf_msg(buf);
1539
1540 lock_sock(new_sk);
1541
1542
1543
1544
1545
1546
1547 reject_rx_queue(new_sk);
1548
1549
1550
1551 new_tsock->peer_name.ref = msg_origport(msg);
1552 new_tsock->peer_name.node = msg_orignode(msg);
1553 tipc_connect2port(new_ref, &new_tsock->peer_name);
1554 new_sock->state = SS_CONNECTED;
1555
1556 tipc_set_portimportance(new_ref, msg_importance(msg));
1557 if (msg_named(msg)) {
1558 new_tport->conn_type = msg_nametype(msg);
1559 new_tport->conn_instance = msg_nameinst(msg);
1560 }
1561
1562
1563
1564
1565
1566
1567 if (!msg_data_sz(msg)) {
1568 struct msghdr m = {NULL,};
1569
1570 advance_rx_queue(sk);
1571 send_packet(NULL, new_sock, &m, 0);
1572 } else {
1573 __skb_dequeue(&sk->sk_receive_queue);
1574 __skb_queue_head(&new_sk->sk_receive_queue, buf);
1575 }
1576 release_sock(new_sk);
1577 }
1578exit:
1579 release_sock(sk);
1580 return res;
1581}
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593static int shutdown(struct socket *sock, int how)
1594{
1595 struct sock *sk = sock->sk;
1596 struct tipc_port *tport = tipc_sk_port(sk);
1597 struct sk_buff *buf;
1598 int res;
1599
1600 if (how != SHUT_RDWR)
1601 return -EINVAL;
1602
1603 lock_sock(sk);
1604
1605 switch (sock->state) {
1606 case SS_CONNECTING:
1607 case SS_CONNECTED:
1608
1609
1610restart:
1611 buf = __skb_dequeue(&sk->sk_receive_queue);
1612 if (buf) {
1613 atomic_dec(&tipc_queue_size);
1614 if (TIPC_SKB_CB(buf)->handle != 0) {
1615 buf_discard(buf);
1616 goto restart;
1617 }
1618 tipc_disconnect(tport->ref);
1619 tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
1620 } else {
1621 tipc_shutdown(tport->ref);
1622 }
1623
1624 sock->state = SS_DISCONNECTING;
1625
1626
1627
1628 case SS_DISCONNECTING:
1629
1630
1631
1632 discard_rx_queue(sk);
1633 if (waitqueue_active(sk_sleep(sk)))
1634 wake_up_interruptible(sk_sleep(sk));
1635 res = 0;
1636 break;
1637
1638 default:
1639 res = -ENOTCONN;
1640 }
1641
1642 release_sock(sk);
1643 return res;
1644}
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660static int setsockopt(struct socket *sock,
1661 int lvl, int opt, char __user *ov, unsigned int ol)
1662{
1663 struct sock *sk = sock->sk;
1664 struct tipc_port *tport = tipc_sk_port(sk);
1665 u32 value;
1666 int res;
1667
1668 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
1669 return 0;
1670 if (lvl != SOL_TIPC)
1671 return -ENOPROTOOPT;
1672 if (ol < sizeof(value))
1673 return -EINVAL;
1674 res = get_user(value, (u32 __user *)ov);
1675 if (res)
1676 return res;
1677
1678 lock_sock(sk);
1679
1680 switch (opt) {
1681 case TIPC_IMPORTANCE:
1682 res = tipc_set_portimportance(tport->ref, value);
1683 break;
1684 case TIPC_SRC_DROPPABLE:
1685 if (sock->type != SOCK_STREAM)
1686 res = tipc_set_portunreliable(tport->ref, value);
1687 else
1688 res = -ENOPROTOOPT;
1689 break;
1690 case TIPC_DEST_DROPPABLE:
1691 res = tipc_set_portunreturnable(tport->ref, value);
1692 break;
1693 case TIPC_CONN_TIMEOUT:
1694 tipc_sk(sk)->conn_timeout = value;
1695
1696 break;
1697 default:
1698 res = -EINVAL;
1699 }
1700
1701 release_sock(sk);
1702
1703 return res;
1704}
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720static int getsockopt(struct socket *sock,
1721 int lvl, int opt, char __user *ov, int __user *ol)
1722{
1723 struct sock *sk = sock->sk;
1724 struct tipc_port *tport = tipc_sk_port(sk);
1725 int len;
1726 u32 value;
1727 int res;
1728
1729 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
1730 return put_user(0, ol);
1731 if (lvl != SOL_TIPC)
1732 return -ENOPROTOOPT;
1733 res = get_user(len, ol);
1734 if (res)
1735 return res;
1736
1737 lock_sock(sk);
1738
1739 switch (opt) {
1740 case TIPC_IMPORTANCE:
1741 res = tipc_portimportance(tport->ref, &value);
1742 break;
1743 case TIPC_SRC_DROPPABLE:
1744 res = tipc_portunreliable(tport->ref, &value);
1745 break;
1746 case TIPC_DEST_DROPPABLE:
1747 res = tipc_portunreturnable(tport->ref, &value);
1748 break;
1749 case TIPC_CONN_TIMEOUT:
1750 value = tipc_sk(sk)->conn_timeout;
1751
1752 break;
1753 case TIPC_NODE_RECVQ_DEPTH:
1754 value = (u32)atomic_read(&tipc_queue_size);
1755 break;
1756 case TIPC_SOCK_RECVQ_DEPTH:
1757 value = skb_queue_len(&sk->sk_receive_queue);
1758 break;
1759 default:
1760 res = -EINVAL;
1761 }
1762
1763 release_sock(sk);
1764
1765 if (res)
1766 return res;
1767
1768 if (len < sizeof(value))
1769 return -EINVAL;
1770
1771 if (copy_to_user(ov, &value, sizeof(value)))
1772 return -EFAULT;
1773
1774 return put_user(sizeof(value), ol);
1775}
1776
1777
1778
1779
1780
1781static const struct proto_ops msg_ops = {
1782 .owner = THIS_MODULE,
1783 .family = AF_TIPC,
1784 .release = release,
1785 .bind = bind,
1786 .connect = connect,
1787 .socketpair = sock_no_socketpair,
1788 .accept = sock_no_accept,
1789 .getname = get_name,
1790 .poll = poll,
1791 .ioctl = sock_no_ioctl,
1792 .listen = sock_no_listen,
1793 .shutdown = shutdown,
1794 .setsockopt = setsockopt,
1795 .getsockopt = getsockopt,
1796 .sendmsg = send_msg,
1797 .recvmsg = recv_msg,
1798 .mmap = sock_no_mmap,
1799 .sendpage = sock_no_sendpage
1800};
1801
1802static const struct proto_ops packet_ops = {
1803 .owner = THIS_MODULE,
1804 .family = AF_TIPC,
1805 .release = release,
1806 .bind = bind,
1807 .connect = connect,
1808 .socketpair = sock_no_socketpair,
1809 .accept = accept,
1810 .getname = get_name,
1811 .poll = poll,
1812 .ioctl = sock_no_ioctl,
1813 .listen = listen,
1814 .shutdown = shutdown,
1815 .setsockopt = setsockopt,
1816 .getsockopt = getsockopt,
1817 .sendmsg = send_packet,
1818 .recvmsg = recv_msg,
1819 .mmap = sock_no_mmap,
1820 .sendpage = sock_no_sendpage
1821};
1822
1823static const struct proto_ops stream_ops = {
1824 .owner = THIS_MODULE,
1825 .family = AF_TIPC,
1826 .release = release,
1827 .bind = bind,
1828 .connect = connect,
1829 .socketpair = sock_no_socketpair,
1830 .accept = accept,
1831 .getname = get_name,
1832 .poll = poll,
1833 .ioctl = sock_no_ioctl,
1834 .listen = listen,
1835 .shutdown = shutdown,
1836 .setsockopt = setsockopt,
1837 .getsockopt = getsockopt,
1838 .sendmsg = send_stream,
1839 .recvmsg = recv_stream,
1840 .mmap = sock_no_mmap,
1841 .sendpage = sock_no_sendpage
1842};
1843
1844static const struct net_proto_family tipc_family_ops = {
1845 .owner = THIS_MODULE,
1846 .family = AF_TIPC,
1847 .create = tipc_create
1848};
1849
1850static struct proto tipc_proto = {
1851 .name = "TIPC",
1852 .owner = THIS_MODULE,
1853 .obj_size = sizeof(struct tipc_sock)
1854};
1855
1856
1857
1858
1859
1860
1861int tipc_socket_init(void)
1862{
1863 int res;
1864
1865 res = proto_register(&tipc_proto, 1);
1866 if (res) {
1867 err("Failed to register TIPC protocol type\n");
1868 goto out;
1869 }
1870
1871 res = sock_register(&tipc_family_ops);
1872 if (res) {
1873 err("Failed to register TIPC socket type\n");
1874 proto_unregister(&tipc_proto);
1875 goto out;
1876 }
1877
1878 sockets_enabled = 1;
1879 out:
1880 return res;
1881}
1882
1883
1884
1885
1886
1887void tipc_socket_stop(void)
1888{
1889 if (!sockets_enabled)
1890 return;
1891
1892 sockets_enabled = 0;
1893 sock_unregister(tipc_family_ops.family);
1894 proto_unregister(&tipc_proto);
1895}
1896
1897