1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#include <linux/module.h>
46#include <linux/init.h>
47#include <linux/kmod.h>
48#include <linux/slab.h>
49#include <linux/list.h>
50#include <linux/spinlock.h>
51#include <linux/rcupdate.h>
52#include <linux/uaccess.h>
53#include <linux/net.h>
54#include <linux/netdevice.h>
55#include <linux/socket.h>
56#include <linux/if_ether.h>
57#include <linux/if_arp.h>
58#include <linux/skbuff.h>
59#include <linux/can.h>
60#include <linux/can/core.h>
61#include <net/net_namespace.h>
62#include <net/sock.h>
63
64#include "af_can.h"
65
66static __initdata const char banner[] = KERN_INFO
67 "can: controller area network core (" CAN_VERSION_STRING ")\n";
68
69MODULE_DESCRIPTION("Controller Area Network PF_CAN core");
70MODULE_LICENSE("Dual BSD/GPL");
71MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>, "
72 "Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
73
74MODULE_ALIAS_NETPROTO(PF_CAN);
75
76static int stats_timer __read_mostly = 1;
77module_param(stats_timer, int, S_IRUGO);
78MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
79
80HLIST_HEAD(can_rx_dev_list);
81static struct dev_rcv_lists can_rx_alldev_list;
82static DEFINE_SPINLOCK(can_rcvlists_lock);
83
84static struct kmem_cache *rcv_cache __read_mostly;
85
86
87static struct can_proto *proto_tab[CAN_NPROTO] __read_mostly;
88static DEFINE_SPINLOCK(proto_tab_lock);
89
90struct timer_list can_stattimer;
91struct s_stats can_stats;
92struct s_pstats can_pstats;
93
94
95
96
97
98static int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
99{
100 struct sock *sk = sock->sk;
101
102 switch (cmd) {
103
104 case SIOCGSTAMP:
105 return sock_get_timestamp(sk, (struct timeval __user *)arg);
106
107 default:
108 return -ENOIOCTLCMD;
109 }
110}
111
112static void can_sock_destruct(struct sock *sk)
113{
114 skb_queue_purge(&sk->sk_receive_queue);
115}
116
117static int can_create(struct net *net, struct socket *sock, int protocol)
118{
119 struct sock *sk;
120 struct can_proto *cp;
121 int err = 0;
122
123 sock->state = SS_UNCONNECTED;
124
125 if (protocol < 0 || protocol >= CAN_NPROTO)
126 return -EINVAL;
127
128 if (net != &init_net)
129 return -EAFNOSUPPORT;
130
131#ifdef CONFIG_MODULES
132
133 if (!proto_tab[protocol]) {
134 err = request_module("can-proto-%d", protocol);
135
136
137
138
139
140
141 if (err && printk_ratelimit())
142 printk(KERN_ERR "can: request_module "
143 "(can-proto-%d) failed.\n", protocol);
144 }
145#endif
146
147 spin_lock(&proto_tab_lock);
148 cp = proto_tab[protocol];
149 if (cp && !try_module_get(cp->prot->owner))
150 cp = NULL;
151 spin_unlock(&proto_tab_lock);
152
153
154
155 if (!cp)
156 return -EPROTONOSUPPORT;
157
158 if (cp->type != sock->type) {
159 err = -EPROTONOSUPPORT;
160 goto errout;
161 }
162
163 if (cp->capability >= 0 && !capable(cp->capability)) {
164 err = -EPERM;
165 goto errout;
166 }
167
168 sock->ops = cp->ops;
169
170 sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot);
171 if (!sk) {
172 err = -ENOMEM;
173 goto errout;
174 }
175
176 sock_init_data(sock, sk);
177 sk->sk_destruct = can_sock_destruct;
178
179 if (sk->sk_prot->init)
180 err = sk->sk_prot->init(sk);
181
182 if (err) {
183
184 sock_orphan(sk);
185 sock_put(sk);
186 }
187
188 errout:
189 module_put(cp->prot->owner);
190 return err;
191}
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210int can_send(struct sk_buff *skb, int loop)
211{
212 struct sk_buff *newskb = NULL;
213 struct can_frame *cf = (struct can_frame *)skb->data;
214 int err;
215
216 if (skb->len != sizeof(struct can_frame) || cf->can_dlc > 8) {
217 kfree_skb(skb);
218 return -EINVAL;
219 }
220
221 if (skb->dev->type != ARPHRD_CAN) {
222 kfree_skb(skb);
223 return -EPERM;
224 }
225
226 if (!(skb->dev->flags & IFF_UP)) {
227 kfree_skb(skb);
228 return -ENETDOWN;
229 }
230
231 skb->protocol = htons(ETH_P_CAN);
232 skb_reset_network_header(skb);
233 skb_reset_transport_header(skb);
234
235 if (loop) {
236
237
238
239 skb->pkt_type = PACKET_LOOPBACK;
240
241
242
243
244
245
246
247
248
249
250 if (!(skb->dev->flags & IFF_ECHO)) {
251
252
253
254
255 newskb = skb_clone(skb, GFP_ATOMIC);
256 if (!newskb) {
257 kfree_skb(skb);
258 return -ENOMEM;
259 }
260
261 newskb->sk = skb->sk;
262 newskb->ip_summed = CHECKSUM_UNNECESSARY;
263 newskb->pkt_type = PACKET_BROADCAST;
264 }
265 } else {
266
267 skb->pkt_type = PACKET_HOST;
268 }
269
270
271 err = dev_queue_xmit(skb);
272 if (err > 0)
273 err = net_xmit_errno(err);
274
275 if (err) {
276 kfree_skb(newskb);
277 return err;
278 }
279
280 if (newskb)
281 netif_rx(newskb);
282
283
284 can_stats.tx_frames++;
285 can_stats.tx_frames_delta++;
286
287 return 0;
288}
289EXPORT_SYMBOL(can_send);
290
291
292
293
294
295static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
296{
297 struct dev_rcv_lists *d = NULL;
298 struct hlist_node *n;
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313 hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) {
314 if (d->dev == dev)
315 break;
316 }
317
318 return n ? d : NULL;
319}
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
349 struct dev_rcv_lists *d)
350{
351 canid_t inv = *can_id & CAN_INV_FILTER;
352
353
354 if (*mask & CAN_ERR_FLAG) {
355
356 *mask &= CAN_ERR_MASK;
357 return &d->rx[RX_ERR];
358 }
359
360
361
362#define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG)
363
364
365 if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG))
366 *mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS);
367
368
369 *can_id &= *mask;
370
371
372 if (inv)
373 return &d->rx[RX_INV];
374
375
376 if (!(*mask))
377 return &d->rx[RX_ALL];
378
379
380 if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS)
381 && !(*can_id & CAN_RTR_FLAG)) {
382
383 if (*can_id & CAN_EFF_FLAG) {
384 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) {
385
386 return &d->rx[RX_EFF];
387 }
388 } else {
389 if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
390 return &d->rx_sff[*can_id];
391 }
392 }
393
394
395 return &d->rx[RX_FIL];
396}
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
428 void (*func)(struct sk_buff *, void *), void *data,
429 char *ident)
430{
431 struct receiver *r;
432 struct hlist_head *rl;
433 struct dev_rcv_lists *d;
434 int err = 0;
435
436
437
438 r = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
439 if (!r)
440 return -ENOMEM;
441
442 spin_lock(&can_rcvlists_lock);
443
444 d = find_dev_rcv_lists(dev);
445 if (d) {
446 rl = find_rcv_list(&can_id, &mask, d);
447
448 r->can_id = can_id;
449 r->mask = mask;
450 r->matches = 0;
451 r->func = func;
452 r->data = data;
453 r->ident = ident;
454
455 hlist_add_head_rcu(&r->list, rl);
456 d->entries++;
457
458 can_pstats.rcv_entries++;
459 if (can_pstats.rcv_entries_max < can_pstats.rcv_entries)
460 can_pstats.rcv_entries_max = can_pstats.rcv_entries;
461 } else {
462 kmem_cache_free(rcv_cache, r);
463 err = -ENODEV;
464 }
465
466 spin_unlock(&can_rcvlists_lock);
467
468 return err;
469}
470EXPORT_SYMBOL(can_rx_register);
471
472
473
474
475static void can_rx_delete_device(struct rcu_head *rp)
476{
477 struct dev_rcv_lists *d = container_of(rp, struct dev_rcv_lists, rcu);
478
479 kfree(d);
480}
481
482
483
484
485static void can_rx_delete_receiver(struct rcu_head *rp)
486{
487 struct receiver *r = container_of(rp, struct receiver, rcu);
488
489 kmem_cache_free(rcv_cache, r);
490}
491
492
493
494
495
496
497
498
499
500
501
502
503void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
504 void (*func)(struct sk_buff *, void *), void *data)
505{
506 struct receiver *r = NULL;
507 struct hlist_head *rl;
508 struct hlist_node *next;
509 struct dev_rcv_lists *d;
510
511 spin_lock(&can_rcvlists_lock);
512
513 d = find_dev_rcv_lists(dev);
514 if (!d) {
515 printk(KERN_ERR "BUG: receive list not found for "
516 "dev %s, id %03X, mask %03X\n",
517 DNAME(dev), can_id, mask);
518 goto out;
519 }
520
521 rl = find_rcv_list(&can_id, &mask, d);
522
523
524
525
526
527
528
529 hlist_for_each_entry_rcu(r, next, rl, list) {
530 if (r->can_id == can_id && r->mask == mask
531 && r->func == func && r->data == data)
532 break;
533 }
534
535
536
537
538
539
540
541 if (!next) {
542 printk(KERN_ERR "BUG: receive list entry not found for "
543 "dev %s, id %03X, mask %03X\n",
544 DNAME(dev), can_id, mask);
545 r = NULL;
546 d = NULL;
547 goto out;
548 }
549
550 hlist_del_rcu(&r->list);
551 d->entries--;
552
553 if (can_pstats.rcv_entries > 0)
554 can_pstats.rcv_entries--;
555
556
557 if (d->remove_on_zero_entries && !d->entries)
558 hlist_del_rcu(&d->list);
559 else
560 d = NULL;
561
562 out:
563 spin_unlock(&can_rcvlists_lock);
564
565
566 if (r)
567 call_rcu(&r->rcu, can_rx_delete_receiver);
568
569
570 if (d)
571 call_rcu(&d->rcu, can_rx_delete_device);
572}
573EXPORT_SYMBOL(can_rx_unregister);
574
575static inline void deliver(struct sk_buff *skb, struct receiver *r)
576{
577 r->func(skb, r->data);
578 r->matches++;
579}
580
581static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
582{
583 struct receiver *r;
584 struct hlist_node *n;
585 int matches = 0;
586 struct can_frame *cf = (struct can_frame *)skb->data;
587 canid_t can_id = cf->can_id;
588
589 if (d->entries == 0)
590 return 0;
591
592 if (can_id & CAN_ERR_FLAG) {
593
594 hlist_for_each_entry_rcu(r, n, &d->rx[RX_ERR], list) {
595 if (can_id & r->mask) {
596 deliver(skb, r);
597 matches++;
598 }
599 }
600 return matches;
601 }
602
603
604 hlist_for_each_entry_rcu(r, n, &d->rx[RX_ALL], list) {
605 deliver(skb, r);
606 matches++;
607 }
608
609
610 hlist_for_each_entry_rcu(r, n, &d->rx[RX_FIL], list) {
611 if ((can_id & r->mask) == r->can_id) {
612 deliver(skb, r);
613 matches++;
614 }
615 }
616
617
618 hlist_for_each_entry_rcu(r, n, &d->rx[RX_INV], list) {
619 if ((can_id & r->mask) != r->can_id) {
620 deliver(skb, r);
621 matches++;
622 }
623 }
624
625
626 if (can_id & CAN_RTR_FLAG)
627 return matches;
628
629 if (can_id & CAN_EFF_FLAG) {
630 hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) {
631 if (r->can_id == can_id) {
632 deliver(skb, r);
633 matches++;
634 }
635 }
636 } else {
637 can_id &= CAN_SFF_MASK;
638 hlist_for_each_entry_rcu(r, n, &d->rx_sff[can_id], list) {
639 deliver(skb, r);
640 matches++;
641 }
642 }
643
644 return matches;
645}
646
647static int can_rcv(struct sk_buff *skb, struct net_device *dev,
648 struct packet_type *pt, struct net_device *orig_dev)
649{
650 struct dev_rcv_lists *d;
651 struct can_frame *cf = (struct can_frame *)skb->data;
652 int matches;
653
654 if (dev->type != ARPHRD_CAN || !net_eq(dev_net(dev), &init_net)) {
655 kfree_skb(skb);
656 return 0;
657 }
658
659 BUG_ON(skb->len != sizeof(struct can_frame) || cf->can_dlc > 8);
660
661
662 can_stats.rx_frames++;
663 can_stats.rx_frames_delta++;
664
665 rcu_read_lock();
666
667
668 matches = can_rcv_filter(&can_rx_alldev_list, skb);
669
670
671 d = find_dev_rcv_lists(dev);
672 if (d)
673 matches += can_rcv_filter(d, skb);
674
675 rcu_read_unlock();
676
677
678 consume_skb(skb);
679
680 if (matches > 0) {
681 can_stats.matches++;
682 can_stats.matches_delta++;
683 }
684
685 return 0;
686}
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702int can_proto_register(struct can_proto *cp)
703{
704 int proto = cp->protocol;
705 int err = 0;
706
707 if (proto < 0 || proto >= CAN_NPROTO) {
708 printk(KERN_ERR "can: protocol number %d out of range\n",
709 proto);
710 return -EINVAL;
711 }
712
713 err = proto_register(cp->prot, 0);
714 if (err < 0)
715 return err;
716
717 spin_lock(&proto_tab_lock);
718 if (proto_tab[proto]) {
719 printk(KERN_ERR "can: protocol %d already registered\n",
720 proto);
721 err = -EBUSY;
722 } else {
723 proto_tab[proto] = cp;
724
725
726 if (!cp->ops->ioctl)
727 cp->ops->ioctl = can_ioctl;
728 }
729 spin_unlock(&proto_tab_lock);
730
731 if (err < 0)
732 proto_unregister(cp->prot);
733
734 return err;
735}
736EXPORT_SYMBOL(can_proto_register);
737
738
739
740
741
742void can_proto_unregister(struct can_proto *cp)
743{
744 int proto = cp->protocol;
745
746 spin_lock(&proto_tab_lock);
747 if (!proto_tab[proto]) {
748 printk(KERN_ERR "BUG: can: protocol %d is not registered\n",
749 proto);
750 }
751 proto_tab[proto] = NULL;
752 spin_unlock(&proto_tab_lock);
753
754 proto_unregister(cp->prot);
755}
756EXPORT_SYMBOL(can_proto_unregister);
757
758
759
760
761static int can_notifier(struct notifier_block *nb, unsigned long msg,
762 void *data)
763{
764 struct net_device *dev = (struct net_device *)data;
765 struct dev_rcv_lists *d;
766
767 if (!net_eq(dev_net(dev), &init_net))
768 return NOTIFY_DONE;
769
770 if (dev->type != ARPHRD_CAN)
771 return NOTIFY_DONE;
772
773 switch (msg) {
774
775 case NETDEV_REGISTER:
776
777
778
779
780
781
782
783
784
785
786 d = kzalloc(sizeof(*d), GFP_KERNEL);
787 if (!d) {
788 printk(KERN_ERR
789 "can: allocation of receive list failed\n");
790 return NOTIFY_DONE;
791 }
792 d->dev = dev;
793
794 spin_lock(&can_rcvlists_lock);
795 hlist_add_head_rcu(&d->list, &can_rx_dev_list);
796 spin_unlock(&can_rcvlists_lock);
797
798 break;
799
800 case NETDEV_UNREGISTER:
801 spin_lock(&can_rcvlists_lock);
802
803 d = find_dev_rcv_lists(dev);
804 if (d) {
805 if (d->entries) {
806 d->remove_on_zero_entries = 1;
807 d = NULL;
808 } else
809 hlist_del_rcu(&d->list);
810 } else
811 printk(KERN_ERR "can: notifier: receive list not "
812 "found for dev %s\n", dev->name);
813
814 spin_unlock(&can_rcvlists_lock);
815
816 if (d)
817 call_rcu(&d->rcu, can_rx_delete_device);
818
819 break;
820 }
821
822 return NOTIFY_DONE;
823}
824
825
826
827
828
829static struct packet_type can_packet __read_mostly = {
830 .type = cpu_to_be16(ETH_P_CAN),
831 .dev = NULL,
832 .func = can_rcv,
833};
834
835static struct net_proto_family can_family_ops __read_mostly = {
836 .family = PF_CAN,
837 .create = can_create,
838 .owner = THIS_MODULE,
839};
840
841
842static struct notifier_block can_netdev_notifier __read_mostly = {
843 .notifier_call = can_notifier,
844};
845
846static __init int can_init(void)
847{
848 printk(banner);
849
850 rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver),
851 0, 0, NULL);
852 if (!rcv_cache)
853 return -ENOMEM;
854
855
856
857
858
859
860
861 spin_lock(&can_rcvlists_lock);
862 hlist_add_head_rcu(&can_rx_alldev_list.list, &can_rx_dev_list);
863 spin_unlock(&can_rcvlists_lock);
864
865 if (stats_timer) {
866
867 setup_timer(&can_stattimer, can_stat_update, 0);
868 mod_timer(&can_stattimer, round_jiffies(jiffies + HZ));
869 } else
870 can_stattimer.function = NULL;
871
872 can_init_proc();
873
874
875 sock_register(&can_family_ops);
876 register_netdevice_notifier(&can_netdev_notifier);
877 dev_add_pack(&can_packet);
878
879 return 0;
880}
881
882static __exit void can_exit(void)
883{
884 struct dev_rcv_lists *d;
885 struct hlist_node *n, *next;
886
887 if (stats_timer)
888 del_timer(&can_stattimer);
889
890 can_remove_proc();
891
892
893 dev_remove_pack(&can_packet);
894 unregister_netdevice_notifier(&can_netdev_notifier);
895 sock_unregister(PF_CAN);
896
897
898 spin_lock(&can_rcvlists_lock);
899 hlist_del(&can_rx_alldev_list.list);
900 hlist_for_each_entry_safe(d, n, next, &can_rx_dev_list, list) {
901 hlist_del(&d->list);
902 kfree(d);
903 }
904 spin_unlock(&can_rcvlists_lock);
905
906 kmem_cache_destroy(rcv_cache);
907}
908
909module_init(can_init);
910module_exit(can_exit);
911