1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#include <linux/module.h>
46#include <linux/init.h>
47#include <linux/kmod.h>
48#include <linux/slab.h>
49#include <linux/list.h>
50#include <linux/spinlock.h>
51#include <linux/rcupdate.h>
52#include <linux/uaccess.h>
53#include <linux/net.h>
54#include <linux/netdevice.h>
55#include <linux/socket.h>
56#include <linux/if_ether.h>
57#include <linux/if_arp.h>
58#include <linux/skbuff.h>
59#include <linux/can.h>
60#include <linux/can/core.h>
61#include <net/net_namespace.h>
62#include <net/sock.h>
63
64#include "af_can.h"
65
66static __initdata const char banner[] = KERN_INFO
67 "can: controller area network core (" CAN_VERSION_STRING ")\n";
68
69MODULE_DESCRIPTION("Controller Area Network PF_CAN core");
70MODULE_LICENSE("Dual BSD/GPL");
71MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>, "
72 "Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
73
74MODULE_ALIAS_NETPROTO(PF_CAN);
75
76static int stats_timer __read_mostly = 1;
77module_param(stats_timer, int, S_IRUGO);
78MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
79
80HLIST_HEAD(can_rx_dev_list);
81static struct dev_rcv_lists can_rx_alldev_list;
82static DEFINE_SPINLOCK(can_rcvlists_lock);
83
84static struct kmem_cache *rcv_cache __read_mostly;
85
86
87static struct can_proto *proto_tab[CAN_NPROTO] __read_mostly;
88static DEFINE_SPINLOCK(proto_tab_lock);
89
90struct timer_list can_stattimer;
91struct s_stats can_stats;
92struct s_pstats can_pstats;
93
94
95
96
97
98static int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
99{
100 struct sock *sk = sock->sk;
101
102 switch (cmd) {
103
104 case SIOCGSTAMP:
105 return sock_get_timestamp(sk, (struct timeval __user *)arg);
106
107 default:
108 return -ENOIOCTLCMD;
109 }
110}
111
112static void can_sock_destruct(struct sock *sk)
113{
114 skb_queue_purge(&sk->sk_receive_queue);
115}
116
117static int can_create(struct net *net, struct socket *sock, int protocol)
118{
119 struct sock *sk;
120 struct can_proto *cp;
121 int err = 0;
122
123 sock->state = SS_UNCONNECTED;
124
125 if (protocol < 0 || protocol >= CAN_NPROTO)
126 return -EINVAL;
127
128 if (net != &init_net)
129 return -EAFNOSUPPORT;
130
131#ifdef CONFIG_KMOD
132
133 if (!proto_tab[protocol]) {
134 err = request_module("can-proto-%d", protocol);
135
136
137
138
139
140
141 if (err && printk_ratelimit())
142 printk(KERN_ERR "can: request_module "
143 "(can-proto-%d) failed.\n", protocol);
144 }
145#endif
146
147 spin_lock(&proto_tab_lock);
148 cp = proto_tab[protocol];
149 if (cp && !try_module_get(cp->prot->owner))
150 cp = NULL;
151 spin_unlock(&proto_tab_lock);
152
153
154
155 if (!cp)
156 return -EPROTONOSUPPORT;
157
158 if (cp->type != sock->type) {
159 err = -EPROTONOSUPPORT;
160 goto errout;
161 }
162
163 if (cp->capability >= 0 && !capable(cp->capability)) {
164 err = -EPERM;
165 goto errout;
166 }
167
168 sock->ops = cp->ops;
169
170 sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot);
171 if (!sk) {
172 err = -ENOMEM;
173 goto errout;
174 }
175
176 sock_init_data(sock, sk);
177 sk->sk_destruct = can_sock_destruct;
178
179 if (sk->sk_prot->init)
180 err = sk->sk_prot->init(sk);
181
182 if (err) {
183
184 sock_orphan(sk);
185 sock_put(sk);
186 }
187
188 errout:
189 module_put(cp->prot->owner);
190 return err;
191}
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210int can_send(struct sk_buff *skb, int loop)
211{
212 struct sk_buff *newskb = NULL;
213 struct can_frame *cf = (struct can_frame *)skb->data;
214 int err;
215
216 if (skb->len != sizeof(struct can_frame) || cf->can_dlc > 8) {
217 kfree_skb(skb);
218 return -EINVAL;
219 }
220
221 if (skb->dev->type != ARPHRD_CAN) {
222 kfree_skb(skb);
223 return -EPERM;
224 }
225
226 if (!(skb->dev->flags & IFF_UP)) {
227 kfree_skb(skb);
228 return -ENETDOWN;
229 }
230
231 skb->protocol = htons(ETH_P_CAN);
232 skb_reset_network_header(skb);
233 skb_reset_transport_header(skb);
234
235 if (loop) {
236
237
238
239 skb->pkt_type = PACKET_LOOPBACK;
240
241
242
243
244
245
246
247
248
249
250 if (!(skb->dev->flags & IFF_ECHO)) {
251
252
253
254
255 newskb = skb_clone(skb, GFP_ATOMIC);
256 if (!newskb) {
257 kfree_skb(skb);
258 return -ENOMEM;
259 }
260
261 newskb->sk = skb->sk;
262 newskb->ip_summed = CHECKSUM_UNNECESSARY;
263 newskb->pkt_type = PACKET_BROADCAST;
264 }
265 } else {
266
267 skb->pkt_type = PACKET_HOST;
268 }
269
270
271 err = dev_queue_xmit(skb);
272 if (err > 0)
273 err = net_xmit_errno(err);
274
275 if (err) {
276 if (newskb)
277 kfree_skb(newskb);
278 return err;
279 }
280
281 if (newskb)
282 netif_rx(newskb);
283
284
285 can_stats.tx_frames++;
286 can_stats.tx_frames_delta++;
287
288 return 0;
289}
290EXPORT_SYMBOL(can_send);
291
292
293
294
295
296static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
297{
298 struct dev_rcv_lists *d = NULL;
299 struct hlist_node *n;
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314 hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) {
315 if (d->dev == dev)
316 break;
317 }
318
319 return n ? d : NULL;
320}
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
350 struct dev_rcv_lists *d)
351{
352 canid_t inv = *can_id & CAN_INV_FILTER;
353
354
355 if (*mask & CAN_ERR_FLAG) {
356
357 *mask &= CAN_ERR_MASK;
358 return &d->rx[RX_ERR];
359 }
360
361
362
363#define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG)
364
365
366 if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG))
367 *mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS);
368
369
370 *can_id &= *mask;
371
372
373 if (inv)
374 return &d->rx[RX_INV];
375
376
377 if (!(*mask))
378 return &d->rx[RX_ALL];
379
380
381 if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS)
382 && !(*can_id & CAN_RTR_FLAG)) {
383
384 if (*can_id & CAN_EFF_FLAG) {
385 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) {
386
387 return &d->rx[RX_EFF];
388 }
389 } else {
390 if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
391 return &d->rx_sff[*can_id];
392 }
393 }
394
395
396 return &d->rx[RX_FIL];
397}
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
423 void (*func)(struct sk_buff *, void *), void *data,
424 char *ident)
425{
426 struct receiver *r;
427 struct hlist_head *rl;
428 struct dev_rcv_lists *d;
429 int err = 0;
430
431
432
433 r = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
434 if (!r)
435 return -ENOMEM;
436
437 spin_lock(&can_rcvlists_lock);
438
439 d = find_dev_rcv_lists(dev);
440 if (d) {
441 rl = find_rcv_list(&can_id, &mask, d);
442
443 r->can_id = can_id;
444 r->mask = mask;
445 r->matches = 0;
446 r->func = func;
447 r->data = data;
448 r->ident = ident;
449
450 hlist_add_head_rcu(&r->list, rl);
451 d->entries++;
452
453 can_pstats.rcv_entries++;
454 if (can_pstats.rcv_entries_max < can_pstats.rcv_entries)
455 can_pstats.rcv_entries_max = can_pstats.rcv_entries;
456 } else {
457 kmem_cache_free(rcv_cache, r);
458 err = -ENODEV;
459 }
460
461 spin_unlock(&can_rcvlists_lock);
462
463 return err;
464}
465EXPORT_SYMBOL(can_rx_register);
466
467
468
469
470static void can_rx_delete_device(struct rcu_head *rp)
471{
472 struct dev_rcv_lists *d = container_of(rp, struct dev_rcv_lists, rcu);
473
474 kfree(d);
475}
476
477
478
479
480static void can_rx_delete_receiver(struct rcu_head *rp)
481{
482 struct receiver *r = container_of(rp, struct receiver, rcu);
483
484 kmem_cache_free(rcv_cache, r);
485}
486
487
488
489
490
491
492
493
494
495
496
497
498void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
499 void (*func)(struct sk_buff *, void *), void *data)
500{
501 struct receiver *r = NULL;
502 struct hlist_head *rl;
503 struct hlist_node *next;
504 struct dev_rcv_lists *d;
505
506 spin_lock(&can_rcvlists_lock);
507
508 d = find_dev_rcv_lists(dev);
509 if (!d) {
510 printk(KERN_ERR "BUG: receive list not found for "
511 "dev %s, id %03X, mask %03X\n",
512 DNAME(dev), can_id, mask);
513 goto out;
514 }
515
516 rl = find_rcv_list(&can_id, &mask, d);
517
518
519
520
521
522
523
524 hlist_for_each_entry_rcu(r, next, rl, list) {
525 if (r->can_id == can_id && r->mask == mask
526 && r->func == func && r->data == data)
527 break;
528 }
529
530
531
532
533
534
535
536 if (!next) {
537 printk(KERN_ERR "BUG: receive list entry not found for "
538 "dev %s, id %03X, mask %03X\n",
539 DNAME(dev), can_id, mask);
540 r = NULL;
541 d = NULL;
542 goto out;
543 }
544
545 hlist_del_rcu(&r->list);
546 d->entries--;
547
548 if (can_pstats.rcv_entries > 0)
549 can_pstats.rcv_entries--;
550
551
552 if (d->remove_on_zero_entries && !d->entries)
553 hlist_del_rcu(&d->list);
554 else
555 d = NULL;
556
557 out:
558 spin_unlock(&can_rcvlists_lock);
559
560
561 if (r)
562 call_rcu(&r->rcu, can_rx_delete_receiver);
563
564
565 if (d)
566 call_rcu(&d->rcu, can_rx_delete_device);
567}
568EXPORT_SYMBOL(can_rx_unregister);
569
570static inline void deliver(struct sk_buff *skb, struct receiver *r)
571{
572 struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
573
574 if (clone) {
575 clone->sk = skb->sk;
576 r->func(clone, r->data);
577 r->matches++;
578 }
579}
580
581static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
582{
583 struct receiver *r;
584 struct hlist_node *n;
585 int matches = 0;
586 struct can_frame *cf = (struct can_frame *)skb->data;
587 canid_t can_id = cf->can_id;
588
589 if (d->entries == 0)
590 return 0;
591
592 if (can_id & CAN_ERR_FLAG) {
593
594 hlist_for_each_entry_rcu(r, n, &d->rx[RX_ERR], list) {
595 if (can_id & r->mask) {
596 deliver(skb, r);
597 matches++;
598 }
599 }
600 return matches;
601 }
602
603
604 hlist_for_each_entry_rcu(r, n, &d->rx[RX_ALL], list) {
605 deliver(skb, r);
606 matches++;
607 }
608
609
610 hlist_for_each_entry_rcu(r, n, &d->rx[RX_FIL], list) {
611 if ((can_id & r->mask) == r->can_id) {
612 deliver(skb, r);
613 matches++;
614 }
615 }
616
617
618 hlist_for_each_entry_rcu(r, n, &d->rx[RX_INV], list) {
619 if ((can_id & r->mask) != r->can_id) {
620 deliver(skb, r);
621 matches++;
622 }
623 }
624
625
626 if (can_id & CAN_RTR_FLAG)
627 return matches;
628
629 if (can_id & CAN_EFF_FLAG) {
630 hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) {
631 if (r->can_id == can_id) {
632 deliver(skb, r);
633 matches++;
634 }
635 }
636 } else {
637 can_id &= CAN_SFF_MASK;
638 hlist_for_each_entry_rcu(r, n, &d->rx_sff[can_id], list) {
639 deliver(skb, r);
640 matches++;
641 }
642 }
643
644 return matches;
645}
646
647static int can_rcv(struct sk_buff *skb, struct net_device *dev,
648 struct packet_type *pt, struct net_device *orig_dev)
649{
650 struct dev_rcv_lists *d;
651 struct can_frame *cf = (struct can_frame *)skb->data;
652 int matches;
653
654 if (dev->type != ARPHRD_CAN || !net_eq(dev_net(dev), &init_net)) {
655 kfree_skb(skb);
656 return 0;
657 }
658
659 BUG_ON(skb->len != sizeof(struct can_frame) || cf->can_dlc > 8);
660
661
662 can_stats.rx_frames++;
663 can_stats.rx_frames_delta++;
664
665 rcu_read_lock();
666
667
668 matches = can_rcv_filter(&can_rx_alldev_list, skb);
669
670
671 d = find_dev_rcv_lists(dev);
672 if (d)
673 matches += can_rcv_filter(d, skb);
674
675 rcu_read_unlock();
676
677
678 kfree_skb(skb);
679
680 if (matches > 0) {
681 can_stats.matches++;
682 can_stats.matches_delta++;
683 }
684
685 return 0;
686}
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702int can_proto_register(struct can_proto *cp)
703{
704 int proto = cp->protocol;
705 int err = 0;
706
707 if (proto < 0 || proto >= CAN_NPROTO) {
708 printk(KERN_ERR "can: protocol number %d out of range\n",
709 proto);
710 return -EINVAL;
711 }
712
713 err = proto_register(cp->prot, 0);
714 if (err < 0)
715 return err;
716
717 spin_lock(&proto_tab_lock);
718 if (proto_tab[proto]) {
719 printk(KERN_ERR "can: protocol %d already registered\n",
720 proto);
721 err = -EBUSY;
722 } else {
723 proto_tab[proto] = cp;
724
725
726 if (!cp->ops->ioctl)
727 cp->ops->ioctl = can_ioctl;
728 }
729 spin_unlock(&proto_tab_lock);
730
731 if (err < 0)
732 proto_unregister(cp->prot);
733
734 return err;
735}
736EXPORT_SYMBOL(can_proto_register);
737
738
739
740
741
742void can_proto_unregister(struct can_proto *cp)
743{
744 int proto = cp->protocol;
745
746 spin_lock(&proto_tab_lock);
747 if (!proto_tab[proto]) {
748 printk(KERN_ERR "BUG: can: protocol %d is not registered\n",
749 proto);
750 }
751 proto_tab[proto] = NULL;
752 spin_unlock(&proto_tab_lock);
753
754 proto_unregister(cp->prot);
755}
756EXPORT_SYMBOL(can_proto_unregister);
757
758
759
760
761static int can_notifier(struct notifier_block *nb, unsigned long msg,
762 void *data)
763{
764 struct net_device *dev = (struct net_device *)data;
765 struct dev_rcv_lists *d;
766
767 if (!net_eq(dev_net(dev), &init_net))
768 return NOTIFY_DONE;
769
770 if (dev->type != ARPHRD_CAN)
771 return NOTIFY_DONE;
772
773 switch (msg) {
774
775 case NETDEV_REGISTER:
776
777
778
779
780
781
782
783
784
785
786 d = kzalloc(sizeof(*d), GFP_KERNEL);
787 if (!d) {
788 printk(KERN_ERR
789 "can: allocation of receive list failed\n");
790 return NOTIFY_DONE;
791 }
792 d->dev = dev;
793
794 spin_lock(&can_rcvlists_lock);
795 hlist_add_head_rcu(&d->list, &can_rx_dev_list);
796 spin_unlock(&can_rcvlists_lock);
797
798 break;
799
800 case NETDEV_UNREGISTER:
801 spin_lock(&can_rcvlists_lock);
802
803 d = find_dev_rcv_lists(dev);
804 if (d) {
805 if (d->entries) {
806 d->remove_on_zero_entries = 1;
807 d = NULL;
808 } else
809 hlist_del_rcu(&d->list);
810 } else
811 printk(KERN_ERR "can: notifier: receive list not "
812 "found for dev %s\n", dev->name);
813
814 spin_unlock(&can_rcvlists_lock);
815
816 if (d)
817 call_rcu(&d->rcu, can_rx_delete_device);
818
819 break;
820 }
821
822 return NOTIFY_DONE;
823}
824
825
826
827
828
829static struct packet_type can_packet __read_mostly = {
830 .type = __constant_htons(ETH_P_CAN),
831 .dev = NULL,
832 .func = can_rcv,
833};
834
835static struct net_proto_family can_family_ops __read_mostly = {
836 .family = PF_CAN,
837 .create = can_create,
838 .owner = THIS_MODULE,
839};
840
841
842static struct notifier_block can_netdev_notifier __read_mostly = {
843 .notifier_call = can_notifier,
844};
845
846static __init int can_init(void)
847{
848 printk(banner);
849
850 rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver),
851 0, 0, NULL);
852 if (!rcv_cache)
853 return -ENOMEM;
854
855
856
857
858
859
860
861 spin_lock(&can_rcvlists_lock);
862 hlist_add_head_rcu(&can_rx_alldev_list.list, &can_rx_dev_list);
863 spin_unlock(&can_rcvlists_lock);
864
865 if (stats_timer) {
866
867 setup_timer(&can_stattimer, can_stat_update, 0);
868 mod_timer(&can_stattimer, round_jiffies(jiffies + HZ));
869 } else
870 can_stattimer.function = NULL;
871
872 can_init_proc();
873
874
875 sock_register(&can_family_ops);
876 register_netdevice_notifier(&can_netdev_notifier);
877 dev_add_pack(&can_packet);
878
879 return 0;
880}
881
882static __exit void can_exit(void)
883{
884 struct dev_rcv_lists *d;
885 struct hlist_node *n, *next;
886
887 if (stats_timer)
888 del_timer(&can_stattimer);
889
890 can_remove_proc();
891
892
893 dev_remove_pack(&can_packet);
894 unregister_netdevice_notifier(&can_netdev_notifier);
895 sock_unregister(PF_CAN);
896
897
898 spin_lock(&can_rcvlists_lock);
899 hlist_del(&can_rx_alldev_list.list);
900 hlist_for_each_entry_safe(d, n, next, &can_rx_dev_list, list) {
901 hlist_del(&d->list);
902 kfree(d);
903 }
904 spin_unlock(&can_rcvlists_lock);
905
906 kmem_cache_destroy(rcv_cache);
907}
908
909module_init(can_init);
910module_exit(can_exit);
911