1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#include <linux/module.h>
44#include <linux/init.h>
45#include <linux/kmod.h>
46#include <linux/slab.h>
47#include <linux/list.h>
48#include <linux/spinlock.h>
49#include <linux/rcupdate.h>
50#include <linux/uaccess.h>
51#include <linux/net.h>
52#include <linux/netdevice.h>
53#include <linux/socket.h>
54#include <linux/if_ether.h>
55#include <linux/if_arp.h>
56#include <linux/skbuff.h>
57#include <linux/can.h>
58#include <linux/can/core.h>
59#include <linux/ratelimit.h>
60#include <net/net_namespace.h>
61#include <net/sock.h>
62
63#include "af_can.h"
64
65static __initdata const char banner[] = KERN_INFO
66 "can: controller area network core (" CAN_VERSION_STRING ")\n";
67
68MODULE_DESCRIPTION("Controller Area Network PF_CAN core");
69MODULE_LICENSE("Dual BSD/GPL");
70MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>, "
71 "Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
72
73MODULE_ALIAS_NETPROTO(PF_CAN);
74
75static int stats_timer __read_mostly = 1;
76module_param(stats_timer, int, S_IRUGO);
77MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
78
79
80struct dev_rcv_lists can_rx_alldev_list;
81static DEFINE_SPINLOCK(can_rcvlists_lock);
82
83static struct kmem_cache *rcv_cache __read_mostly;
84
85
86static const struct can_proto *proto_tab[CAN_NPROTO] __read_mostly;
87static DEFINE_MUTEX(proto_tab_lock);
88
89struct timer_list can_stattimer;
90struct s_stats can_stats;
91struct s_pstats can_pstats;
92
93
94
95
96
97int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
98{
99 struct sock *sk = sock->sk;
100
101 switch (cmd) {
102
103 case SIOCGSTAMP:
104 return sock_get_timestamp(sk, (struct timeval __user *)arg);
105
106 default:
107 return -ENOIOCTLCMD;
108 }
109}
110EXPORT_SYMBOL(can_ioctl);
111
112static void can_sock_destruct(struct sock *sk)
113{
114 skb_queue_purge(&sk->sk_receive_queue);
115}
116
117static const struct can_proto *can_get_proto(int protocol)
118{
119 const struct can_proto *cp;
120
121 rcu_read_lock();
122 cp = rcu_dereference(proto_tab[protocol]);
123 if (cp && !try_module_get(cp->prot->owner))
124 cp = NULL;
125 rcu_read_unlock();
126
127 return cp;
128}
129
130static inline void can_put_proto(const struct can_proto *cp)
131{
132 module_put(cp->prot->owner);
133}
134
135static int can_create(struct net *net, struct socket *sock, int protocol,
136 int kern)
137{
138 struct sock *sk;
139 const struct can_proto *cp;
140 int err = 0;
141
142 sock->state = SS_UNCONNECTED;
143
144 if (protocol < 0 || protocol >= CAN_NPROTO)
145 return -EINVAL;
146
147 if (!net_eq(net, &init_net))
148 return -EAFNOSUPPORT;
149
150 cp = can_get_proto(protocol);
151
152#ifdef CONFIG_MODULES
153 if (!cp) {
154
155
156 err = request_module("can-proto-%d", protocol);
157
158
159
160
161
162
163 if (err)
164 printk_ratelimited(KERN_ERR "can: request_module "
165 "(can-proto-%d) failed.\n", protocol);
166
167 cp = can_get_proto(protocol);
168 }
169#endif
170
171
172
173 if (!cp)
174 return -EPROTONOSUPPORT;
175
176 if (cp->type != sock->type) {
177 err = -EPROTOTYPE;
178 goto errout;
179 }
180
181 sock->ops = cp->ops;
182
183 sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot);
184 if (!sk) {
185 err = -ENOMEM;
186 goto errout;
187 }
188
189 sock_init_data(sock, sk);
190 sk->sk_destruct = can_sock_destruct;
191
192 if (sk->sk_prot->init)
193 err = sk->sk_prot->init(sk);
194
195 if (err) {
196
197 sock_orphan(sk);
198 sock_put(sk);
199 }
200
201 errout:
202 can_put_proto(cp);
203 return err;
204}
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225int can_send(struct sk_buff *skb, int loop)
226{
227 struct sk_buff *newskb = NULL;
228 struct can_frame *cf = (struct can_frame *)skb->data;
229 int err;
230
231 if (skb->len != sizeof(struct can_frame) || cf->can_dlc > 8) {
232 kfree_skb(skb);
233 return -EINVAL;
234 }
235
236 if (skb->dev->type != ARPHRD_CAN) {
237 kfree_skb(skb);
238 return -EPERM;
239 }
240
241 if (!(skb->dev->flags & IFF_UP)) {
242 kfree_skb(skb);
243 return -ENETDOWN;
244 }
245
246 skb->protocol = htons(ETH_P_CAN);
247 skb_reset_network_header(skb);
248 skb_reset_transport_header(skb);
249
250 if (loop) {
251
252
253
254 skb->pkt_type = PACKET_LOOPBACK;
255
256
257
258
259
260
261
262
263
264
265 if (!(skb->dev->flags & IFF_ECHO)) {
266
267
268
269
270 newskb = skb_clone(skb, GFP_ATOMIC);
271 if (!newskb) {
272 kfree_skb(skb);
273 return -ENOMEM;
274 }
275
276 newskb->sk = skb->sk;
277 newskb->ip_summed = CHECKSUM_UNNECESSARY;
278 newskb->pkt_type = PACKET_BROADCAST;
279 }
280 } else {
281
282 skb->pkt_type = PACKET_HOST;
283 }
284
285
286 err = dev_queue_xmit(skb);
287 if (err > 0)
288 err = net_xmit_errno(err);
289
290 if (err) {
291 kfree_skb(newskb);
292 return err;
293 }
294
295 if (newskb)
296 netif_rx_ni(newskb);
297
298
299 can_stats.tx_frames++;
300 can_stats.tx_frames_delta++;
301
302 return 0;
303}
304EXPORT_SYMBOL(can_send);
305
306
307
308
309
310static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
311{
312 if (!dev)
313 return &can_rx_alldev_list;
314 else
315 return (struct dev_rcv_lists *)dev->ml_priv;
316}
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
346 struct dev_rcv_lists *d)
347{
348 canid_t inv = *can_id & CAN_INV_FILTER;
349
350
351 if (*mask & CAN_ERR_FLAG) {
352
353 *mask &= CAN_ERR_MASK;
354 return &d->rx[RX_ERR];
355 }
356
357
358
359#define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG)
360
361
362 if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG))
363 *mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS);
364
365
366 *can_id &= *mask;
367
368
369 if (inv)
370 return &d->rx[RX_INV];
371
372
373 if (!(*mask))
374 return &d->rx[RX_ALL];
375
376
377 if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) &&
378 !(*can_id & CAN_RTR_FLAG)) {
379
380 if (*can_id & CAN_EFF_FLAG) {
381 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) {
382
383 return &d->rx[RX_EFF];
384 }
385 } else {
386 if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
387 return &d->rx_sff[*can_id];
388 }
389 }
390
391
392 return &d->rx[RX_FIL];
393}
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
425 void (*func)(struct sk_buff *, void *), void *data,
426 char *ident)
427{
428 struct receiver *r;
429 struct hlist_head *rl;
430 struct dev_rcv_lists *d;
431 int err = 0;
432
433
434
435 if (dev && dev->type != ARPHRD_CAN)
436 return -ENODEV;
437
438 r = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
439 if (!r)
440 return -ENOMEM;
441
442 spin_lock(&can_rcvlists_lock);
443
444 d = find_dev_rcv_lists(dev);
445 if (d) {
446 rl = find_rcv_list(&can_id, &mask, d);
447
448 r->can_id = can_id;
449 r->mask = mask;
450 r->matches = 0;
451 r->func = func;
452 r->data = data;
453 r->ident = ident;
454
455 hlist_add_head_rcu(&r->list, rl);
456 d->entries++;
457
458 can_pstats.rcv_entries++;
459 if (can_pstats.rcv_entries_max < can_pstats.rcv_entries)
460 can_pstats.rcv_entries_max = can_pstats.rcv_entries;
461 } else {
462 kmem_cache_free(rcv_cache, r);
463 err = -ENODEV;
464 }
465
466 spin_unlock(&can_rcvlists_lock);
467
468 return err;
469}
470EXPORT_SYMBOL(can_rx_register);
471
472
473
474
475static void can_rx_delete_receiver(struct rcu_head *rp)
476{
477 struct receiver *r = container_of(rp, struct receiver, rcu);
478
479 kmem_cache_free(rcv_cache, r);
480}
481
482
483
484
485
486
487
488
489
490
491
492
493void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
494 void (*func)(struct sk_buff *, void *), void *data)
495{
496 struct receiver *r = NULL;
497 struct hlist_head *rl;
498 struct hlist_node *next;
499 struct dev_rcv_lists *d;
500
501 if (dev && dev->type != ARPHRD_CAN)
502 return;
503
504 spin_lock(&can_rcvlists_lock);
505
506 d = find_dev_rcv_lists(dev);
507 if (!d) {
508 printk(KERN_ERR "BUG: receive list not found for "
509 "dev %s, id %03X, mask %03X\n",
510 DNAME(dev), can_id, mask);
511 goto out;
512 }
513
514 rl = find_rcv_list(&can_id, &mask, d);
515
516
517
518
519
520
521
522 hlist_for_each_entry_rcu(r, next, rl, list) {
523 if (r->can_id == can_id && r->mask == mask &&
524 r->func == func && r->data == data)
525 break;
526 }
527
528
529
530
531
532
533
534 if (!next) {
535 printk(KERN_ERR "BUG: receive list entry not found for "
536 "dev %s, id %03X, mask %03X\n",
537 DNAME(dev), can_id, mask);
538 r = NULL;
539 goto out;
540 }
541
542 hlist_del_rcu(&r->list);
543 d->entries--;
544
545 if (can_pstats.rcv_entries > 0)
546 can_pstats.rcv_entries--;
547
548
549 if (d->remove_on_zero_entries && !d->entries) {
550 kfree(d);
551 dev->ml_priv = NULL;
552 }
553
554 out:
555 spin_unlock(&can_rcvlists_lock);
556
557
558 if (r)
559 call_rcu(&r->rcu, can_rx_delete_receiver);
560}
561EXPORT_SYMBOL(can_rx_unregister);
562
563static inline void deliver(struct sk_buff *skb, struct receiver *r)
564{
565 r->func(skb, r->data);
566 r->matches++;
567}
568
569static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
570{
571 struct receiver *r;
572 struct hlist_node *n;
573 int matches = 0;
574 struct can_frame *cf = (struct can_frame *)skb->data;
575 canid_t can_id = cf->can_id;
576
577 if (d->entries == 0)
578 return 0;
579
580 if (can_id & CAN_ERR_FLAG) {
581
582 hlist_for_each_entry_rcu(r, n, &d->rx[RX_ERR], list) {
583 if (can_id & r->mask) {
584 deliver(skb, r);
585 matches++;
586 }
587 }
588 return matches;
589 }
590
591
592 hlist_for_each_entry_rcu(r, n, &d->rx[RX_ALL], list) {
593 deliver(skb, r);
594 matches++;
595 }
596
597
598 hlist_for_each_entry_rcu(r, n, &d->rx[RX_FIL], list) {
599 if ((can_id & r->mask) == r->can_id) {
600 deliver(skb, r);
601 matches++;
602 }
603 }
604
605
606 hlist_for_each_entry_rcu(r, n, &d->rx[RX_INV], list) {
607 if ((can_id & r->mask) != r->can_id) {
608 deliver(skb, r);
609 matches++;
610 }
611 }
612
613
614 if (can_id & CAN_RTR_FLAG)
615 return matches;
616
617 if (can_id & CAN_EFF_FLAG) {
618 hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) {
619 if (r->can_id == can_id) {
620 deliver(skb, r);
621 matches++;
622 }
623 }
624 } else {
625 can_id &= CAN_SFF_MASK;
626 hlist_for_each_entry_rcu(r, n, &d->rx_sff[can_id], list) {
627 deliver(skb, r);
628 matches++;
629 }
630 }
631
632 return matches;
633}
634
635static int can_rcv(struct sk_buff *skb, struct net_device *dev,
636 struct packet_type *pt, struct net_device *orig_dev)
637{
638 struct dev_rcv_lists *d;
639 struct can_frame *cf = (struct can_frame *)skb->data;
640 int matches;
641
642 if (!net_eq(dev_net(dev), &init_net))
643 goto drop;
644
645 if (WARN_ONCE(dev->type != ARPHRD_CAN ||
646 skb->len != sizeof(struct can_frame) ||
647 cf->can_dlc > 8,
648 "PF_CAN: dropped non conform skbuf: "
649 "dev type %d, len %d, can_dlc %d\n",
650 dev->type, skb->len, cf->can_dlc))
651 goto drop;
652
653
654 can_stats.rx_frames++;
655 can_stats.rx_frames_delta++;
656
657 rcu_read_lock();
658
659
660 matches = can_rcv_filter(&can_rx_alldev_list, skb);
661
662
663 d = find_dev_rcv_lists(dev);
664 if (d)
665 matches += can_rcv_filter(d, skb);
666
667 rcu_read_unlock();
668
669
670 consume_skb(skb);
671
672 if (matches > 0) {
673 can_stats.matches++;
674 can_stats.matches_delta++;
675 }
676
677 return NET_RX_SUCCESS;
678
679drop:
680 kfree_skb(skb);
681 return NET_RX_DROP;
682}
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698int can_proto_register(const struct can_proto *cp)
699{
700 int proto = cp->protocol;
701 int err = 0;
702
703 if (proto < 0 || proto >= CAN_NPROTO) {
704 printk(KERN_ERR "can: protocol number %d out of range\n",
705 proto);
706 return -EINVAL;
707 }
708
709 err = proto_register(cp->prot, 0);
710 if (err < 0)
711 return err;
712
713 mutex_lock(&proto_tab_lock);
714
715 if (proto_tab[proto]) {
716 printk(KERN_ERR "can: protocol %d already registered\n",
717 proto);
718 err = -EBUSY;
719 } else
720 RCU_INIT_POINTER(proto_tab[proto], cp);
721
722 mutex_unlock(&proto_tab_lock);
723
724 if (err < 0)
725 proto_unregister(cp->prot);
726
727 return err;
728}
729EXPORT_SYMBOL(can_proto_register);
730
731
732
733
734
735void can_proto_unregister(const struct can_proto *cp)
736{
737 int proto = cp->protocol;
738
739 mutex_lock(&proto_tab_lock);
740 BUG_ON(proto_tab[proto] != cp);
741 RCU_INIT_POINTER(proto_tab[proto], NULL);
742 mutex_unlock(&proto_tab_lock);
743
744 synchronize_rcu();
745
746 proto_unregister(cp->prot);
747}
748EXPORT_SYMBOL(can_proto_unregister);
749
750
751
752
753static int can_notifier(struct notifier_block *nb, unsigned long msg,
754 void *data)
755{
756 struct net_device *dev = (struct net_device *)data;
757 struct dev_rcv_lists *d;
758
759 if (!net_eq(dev_net(dev), &init_net))
760 return NOTIFY_DONE;
761
762 if (dev->type != ARPHRD_CAN)
763 return NOTIFY_DONE;
764
765 switch (msg) {
766
767 case NETDEV_REGISTER:
768
769
770 d = kzalloc(sizeof(*d), GFP_KERNEL);
771 if (!d) {
772 printk(KERN_ERR
773 "can: allocation of receive list failed\n");
774 return NOTIFY_DONE;
775 }
776 BUG_ON(dev->ml_priv);
777 dev->ml_priv = d;
778
779 break;
780
781 case NETDEV_UNREGISTER:
782 spin_lock(&can_rcvlists_lock);
783
784 d = dev->ml_priv;
785 if (d) {
786 if (d->entries)
787 d->remove_on_zero_entries = 1;
788 else {
789 kfree(d);
790 dev->ml_priv = NULL;
791 }
792 } else
793 printk(KERN_ERR "can: notifier: receive list not "
794 "found for dev %s\n", dev->name);
795
796 spin_unlock(&can_rcvlists_lock);
797
798 break;
799 }
800
801 return NOTIFY_DONE;
802}
803
804
805
806
807
808static struct packet_type can_packet __read_mostly = {
809 .type = cpu_to_be16(ETH_P_CAN),
810 .dev = NULL,
811 .func = can_rcv,
812};
813
814static const struct net_proto_family can_family_ops = {
815 .family = PF_CAN,
816 .create = can_create,
817 .owner = THIS_MODULE,
818};
819
820
821static struct notifier_block can_netdev_notifier __read_mostly = {
822 .notifier_call = can_notifier,
823};
824
825static __init int can_init(void)
826{
827 printk(banner);
828
829 memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list));
830
831 rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver),
832 0, 0, NULL);
833 if (!rcv_cache)
834 return -ENOMEM;
835
836 if (stats_timer) {
837
838 setup_timer(&can_stattimer, can_stat_update, 0);
839 mod_timer(&can_stattimer, round_jiffies(jiffies + HZ));
840 } else
841 can_stattimer.function = NULL;
842
843 can_init_proc();
844
845
846 sock_register(&can_family_ops);
847 register_netdevice_notifier(&can_netdev_notifier);
848 dev_add_pack(&can_packet);
849
850 return 0;
851}
852
853static __exit void can_exit(void)
854{
855 struct net_device *dev;
856
857 if (stats_timer)
858 del_timer_sync(&can_stattimer);
859
860 can_remove_proc();
861
862
863 dev_remove_pack(&can_packet);
864 unregister_netdevice_notifier(&can_netdev_notifier);
865 sock_unregister(PF_CAN);
866
867
868 rcu_read_lock();
869 for_each_netdev_rcu(&init_net, dev) {
870 if (dev->type == ARPHRD_CAN && dev->ml_priv){
871
872 struct dev_rcv_lists *d = dev->ml_priv;
873
874 BUG_ON(d->entries);
875 kfree(d);
876 dev->ml_priv = NULL;
877 }
878 }
879 rcu_read_unlock();
880
881 rcu_barrier();
882
883 kmem_cache_destroy(rcv_cache);
884}
885
886module_init(can_init);
887module_exit(can_exit);
888