1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71#include <linux/uaccess.h>
72#include <linux/bitops.h>
73#include <linux/capability.h>
74#include <linux/cpu.h>
75#include <linux/types.h>
76#include <linux/kernel.h>
77#include <linux/hash.h>
78#include <linux/slab.h>
79#include <linux/sched.h>
80#include <linux/sched/mm.h>
81#include <linux/mutex.h>
82#include <linux/rwsem.h>
83#include <linux/string.h>
84#include <linux/mm.h>
85#include <linux/socket.h>
86#include <linux/sockios.h>
87#include <linux/errno.h>
88#include <linux/interrupt.h>
89#include <linux/if_ether.h>
90#include <linux/netdevice.h>
91#include <linux/etherdevice.h>
92#include <linux/ethtool.h>
93#include <linux/skbuff.h>
94#include <linux/kthread.h>
95#include <linux/bpf.h>
96#include <linux/bpf_trace.h>
97#include <net/net_namespace.h>
98#include <net/sock.h>
99#include <net/busy_poll.h>
100#include <linux/rtnetlink.h>
101#include <linux/stat.h>
102#include <net/dsa.h>
103#include <net/dst.h>
104#include <net/dst_metadata.h>
105#include <net/gro.h>
106#include <net/pkt_sched.h>
107#include <net/pkt_cls.h>
108#include <net/checksum.h>
109#include <net/xfrm.h>
110#include <linux/highmem.h>
111#include <linux/init.h>
112#include <linux/module.h>
113#include <linux/netpoll.h>
114#include <linux/rcupdate.h>
115#include <linux/delay.h>
116#include <net/iw_handler.h>
117#include <asm/current.h>
118#include <linux/audit.h>
119#include <linux/dmaengine.h>
120#include <linux/err.h>
121#include <linux/ctype.h>
122#include <linux/if_arp.h>
123#include <linux/if_vlan.h>
124#include <linux/ip.h>
125#include <net/ip.h>
126#include <net/mpls.h>
127#include <linux/ipv6.h>
128#include <linux/in.h>
129#include <linux/jhash.h>
130#include <linux/random.h>
131#include <trace/events/napi.h>
132#include <trace/events/net.h>
133#include <trace/events/skb.h>
134#include <trace/events/qdisc.h>
135#include <linux/inetdevice.h>
136#include <linux/cpu_rmap.h>
137#include <linux/static_key.h>
138#include <linux/hashtable.h>
139#include <linux/vmalloc.h>
140#include <linux/if_macvlan.h>
141#include <linux/errqueue.h>
142#include <linux/hrtimer.h>
143#include <linux/netfilter_ingress.h>
144#include <linux/crash_dump.h>
145#include <linux/sctp.h>
146#include <net/udp_tunnel.h>
147#include <linux/net_namespace.h>
148#include <linux/indirect_call_wrapper.h>
149#include <net/devlink.h>
150#include <linux/pm_runtime.h>
151#include <linux/prandom.h>
152#include <linux/once_lite.h>
153
154#include "net-sysfs.h"
155
156#define MAX_GRO_SKBS 8
157
158
159#define GRO_MAX_HEAD (MAX_HEADER + 128)
160
161static DEFINE_SPINLOCK(ptype_lock);
162static DEFINE_SPINLOCK(offload_lock);
163struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
164struct list_head ptype_all __read_mostly;
165static struct list_head offload_base __read_mostly;
166
167static int netif_rx_internal(struct sk_buff *skb);
168static int call_netdevice_notifiers_info(unsigned long val,
169 struct netdev_notifier_info *info);
170static int call_netdevice_notifiers_extack(unsigned long val,
171 struct net_device *dev,
172 struct netlink_ext_ack *extack);
173static struct napi_struct *napi_by_id(unsigned int napi_id);
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194DEFINE_RWLOCK(dev_base_lock);
195EXPORT_SYMBOL(dev_base_lock);
196
197static DEFINE_MUTEX(ifalias_mutex);
198
199
200static DEFINE_SPINLOCK(napi_hash_lock);
201
202static unsigned int napi_gen_id = NR_CPUS;
203static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
204
205static DECLARE_RWSEM(devnet_rename_sem);
206
207static inline void dev_base_seq_inc(struct net *net)
208{
209 while (++net->dev_base_seq == 0)
210 ;
211}
212
213static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
214{
215 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
216
217 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
218}
219
220static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
221{
222 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
223}
224
225static inline void rps_lock(struct softnet_data *sd)
226{
227#ifdef CONFIG_RPS
228 spin_lock(&sd->input_pkt_queue.lock);
229#endif
230}
231
232static inline void rps_unlock(struct softnet_data *sd)
233{
234#ifdef CONFIG_RPS
235 spin_unlock(&sd->input_pkt_queue.lock);
236#endif
237}
238
239static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
240 const char *name)
241{
242 struct netdev_name_node *name_node;
243
244 name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
245 if (!name_node)
246 return NULL;
247 INIT_HLIST_NODE(&name_node->hlist);
248 name_node->dev = dev;
249 name_node->name = name;
250 return name_node;
251}
252
253static struct netdev_name_node *
254netdev_name_node_head_alloc(struct net_device *dev)
255{
256 struct netdev_name_node *name_node;
257
258 name_node = netdev_name_node_alloc(dev, dev->name);
259 if (!name_node)
260 return NULL;
261 INIT_LIST_HEAD(&name_node->list);
262 return name_node;
263}
264
265static void netdev_name_node_free(struct netdev_name_node *name_node)
266{
267 kfree(name_node);
268}
269
270static void netdev_name_node_add(struct net *net,
271 struct netdev_name_node *name_node)
272{
273 hlist_add_head_rcu(&name_node->hlist,
274 dev_name_hash(net, name_node->name));
275}
276
277static void netdev_name_node_del(struct netdev_name_node *name_node)
278{
279 hlist_del_rcu(&name_node->hlist);
280}
281
282static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
283 const char *name)
284{
285 struct hlist_head *head = dev_name_hash(net, name);
286 struct netdev_name_node *name_node;
287
288 hlist_for_each_entry(name_node, head, hlist)
289 if (!strcmp(name_node->name, name))
290 return name_node;
291 return NULL;
292}
293
294static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
295 const char *name)
296{
297 struct hlist_head *head = dev_name_hash(net, name);
298 struct netdev_name_node *name_node;
299
300 hlist_for_each_entry_rcu(name_node, head, hlist)
301 if (!strcmp(name_node->name, name))
302 return name_node;
303 return NULL;
304}
305
306int netdev_name_node_alt_create(struct net_device *dev, const char *name)
307{
308 struct netdev_name_node *name_node;
309 struct net *net = dev_net(dev);
310
311 name_node = netdev_name_node_lookup(net, name);
312 if (name_node)
313 return -EEXIST;
314 name_node = netdev_name_node_alloc(dev, name);
315 if (!name_node)
316 return -ENOMEM;
317 netdev_name_node_add(net, name_node);
318
319 list_add_tail(&name_node->list, &dev->name_node->list);
320
321 return 0;
322}
323EXPORT_SYMBOL(netdev_name_node_alt_create);
324
325static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
326{
327 list_del(&name_node->list);
328 netdev_name_node_del(name_node);
329 kfree(name_node->name);
330 netdev_name_node_free(name_node);
331}
332
333int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
334{
335 struct netdev_name_node *name_node;
336 struct net *net = dev_net(dev);
337
338 name_node = netdev_name_node_lookup(net, name);
339 if (!name_node)
340 return -ENOENT;
341
342
343
344 if (name_node == dev->name_node || name_node->dev != dev)
345 return -EINVAL;
346
347 __netdev_name_node_alt_destroy(name_node);
348
349 return 0;
350}
351EXPORT_SYMBOL(netdev_name_node_alt_destroy);
352
353static void netdev_name_node_alt_flush(struct net_device *dev)
354{
355 struct netdev_name_node *name_node, *tmp;
356
357 list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
358 __netdev_name_node_alt_destroy(name_node);
359}
360
361
362static void list_netdevice(struct net_device *dev)
363{
364 struct net *net = dev_net(dev);
365
366 ASSERT_RTNL();
367
368 write_lock_bh(&dev_base_lock);
369 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
370 netdev_name_node_add(net, dev->name_node);
371 hlist_add_head_rcu(&dev->index_hlist,
372 dev_index_hash(net, dev->ifindex));
373 write_unlock_bh(&dev_base_lock);
374
375 dev_base_seq_inc(net);
376}
377
378
379
380
381static void unlist_netdevice(struct net_device *dev)
382{
383 ASSERT_RTNL();
384
385
386 write_lock_bh(&dev_base_lock);
387 list_del_rcu(&dev->dev_list);
388 netdev_name_node_del(dev->name_node);
389 hlist_del_rcu(&dev->index_hlist);
390 write_unlock_bh(&dev_base_lock);
391
392 dev_base_seq_inc(dev_net(dev));
393}
394
395
396
397
398
399static RAW_NOTIFIER_HEAD(netdev_chain);
400
401
402
403
404
405
406DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
407EXPORT_PER_CPU_SYMBOL(softnet_data);
408
409#ifdef CONFIG_LOCKDEP
410
411
412
413
414static const unsigned short netdev_lock_type[] = {
415 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
416 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
417 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
418 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
419 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
420 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
421 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
422 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
423 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
424 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
425 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
426 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
427 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
428 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
429 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
430
431static const char *const netdev_lock_name[] = {
432 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
433 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
434 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
435 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
436 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
437 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
438 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
439 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
440 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
441 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
442 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
443 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
444 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
445 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
446 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
447
448static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
449static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
450
451static inline unsigned short netdev_lock_pos(unsigned short dev_type)
452{
453 int i;
454
455 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
456 if (netdev_lock_type[i] == dev_type)
457 return i;
458
459 return ARRAY_SIZE(netdev_lock_type) - 1;
460}
461
462static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
463 unsigned short dev_type)
464{
465 int i;
466
467 i = netdev_lock_pos(dev_type);
468 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
469 netdev_lock_name[i]);
470}
471
472static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
473{
474 int i;
475
476 i = netdev_lock_pos(dev->type);
477 lockdep_set_class_and_name(&dev->addr_list_lock,
478 &netdev_addr_lock_key[i],
479 netdev_lock_name[i]);
480}
481#else
482static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
483 unsigned short dev_type)
484{
485}
486
487static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
488{
489}
490#endif
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515static inline struct list_head *ptype_head(const struct packet_type *pt)
516{
517 if (pt->type == htons(ETH_P_ALL))
518 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
519 else
520 return pt->dev ? &pt->dev->ptype_specific :
521 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
522}
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537void dev_add_pack(struct packet_type *pt)
538{
539 struct list_head *head = ptype_head(pt);
540
541 spin_lock(&ptype_lock);
542 list_add_rcu(&pt->list, head);
543 spin_unlock(&ptype_lock);
544}
545EXPORT_SYMBOL(dev_add_pack);
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560void __dev_remove_pack(struct packet_type *pt)
561{
562 struct list_head *head = ptype_head(pt);
563 struct packet_type *pt1;
564
565 spin_lock(&ptype_lock);
566
567 list_for_each_entry(pt1, head, list) {
568 if (pt == pt1) {
569 list_del_rcu(&pt->list);
570 goto out;
571 }
572 }
573
574 pr_warn("dev_remove_pack: %p not found\n", pt);
575out:
576 spin_unlock(&ptype_lock);
577}
578EXPORT_SYMBOL(__dev_remove_pack);
579
580
581
582
583
584
585
586
587
588
589
590
591
592void dev_remove_pack(struct packet_type *pt)
593{
594 __dev_remove_pack(pt);
595
596 synchronize_net();
597}
598EXPORT_SYMBOL(dev_remove_pack);
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613void dev_add_offload(struct packet_offload *po)
614{
615 struct packet_offload *elem;
616
617 spin_lock(&offload_lock);
618 list_for_each_entry(elem, &offload_base, list) {
619 if (po->priority < elem->priority)
620 break;
621 }
622 list_add_rcu(&po->list, elem->list.prev);
623 spin_unlock(&offload_lock);
624}
625EXPORT_SYMBOL(dev_add_offload);
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640static void __dev_remove_offload(struct packet_offload *po)
641{
642 struct list_head *head = &offload_base;
643 struct packet_offload *po1;
644
645 spin_lock(&offload_lock);
646
647 list_for_each_entry(po1, head, list) {
648 if (po == po1) {
649 list_del_rcu(&po->list);
650 goto out;
651 }
652 }
653
654 pr_warn("dev_remove_offload: %p not found\n", po);
655out:
656 spin_unlock(&offload_lock);
657}
658
659
660
661
662
663
664
665
666
667
668
669
670
671void dev_remove_offload(struct packet_offload *po)
672{
673 __dev_remove_offload(po);
674
675 synchronize_net();
676}
677EXPORT_SYMBOL(dev_remove_offload);
678
679
680
681
682
683
684
685
686static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
687
688
689
690
691
692
693
694
695
696
697static int netdev_boot_setup_add(char *name, struct ifmap *map)
698{
699 struct netdev_boot_setup *s;
700 int i;
701
702 s = dev_boot_setup;
703 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
704 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
705 memset(s[i].name, 0, sizeof(s[i].name));
706 strlcpy(s[i].name, name, IFNAMSIZ);
707 memcpy(&s[i].map, map, sizeof(s[i].map));
708 break;
709 }
710 }
711
712 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
713}
714
715
716
717
718
719
720
721
722
723
724int netdev_boot_setup_check(struct net_device *dev)
725{
726 struct netdev_boot_setup *s = dev_boot_setup;
727 int i;
728
729 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
730 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
731 !strcmp(dev->name, s[i].name)) {
732 dev->irq = s[i].map.irq;
733 dev->base_addr = s[i].map.base_addr;
734 dev->mem_start = s[i].map.mem_start;
735 dev->mem_end = s[i].map.mem_end;
736 return 1;
737 }
738 }
739 return 0;
740}
741EXPORT_SYMBOL(netdev_boot_setup_check);
742
743
744
745
746
747
748
749
750
751
752
753
754unsigned long netdev_boot_base(const char *prefix, int unit)
755{
756 const struct netdev_boot_setup *s = dev_boot_setup;
757 char name[IFNAMSIZ];
758 int i;
759
760 sprintf(name, "%s%d", prefix, unit);
761
762
763
764
765
766 if (__dev_get_by_name(&init_net, name))
767 return 1;
768
769 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
770 if (!strcmp(name, s[i].name))
771 return s[i].map.base_addr;
772 return 0;
773}
774
775
776
777
778int __init netdev_boot_setup(char *str)
779{
780 int ints[5];
781 struct ifmap map;
782
783 str = get_options(str, ARRAY_SIZE(ints), ints);
784 if (!str || !*str)
785 return 0;
786
787
788 memset(&map, 0, sizeof(map));
789 if (ints[0] > 0)
790 map.irq = ints[1];
791 if (ints[0] > 1)
792 map.base_addr = ints[2];
793 if (ints[0] > 2)
794 map.mem_start = ints[3];
795 if (ints[0] > 3)
796 map.mem_end = ints[4];
797
798
799 return netdev_boot_setup_add(str, &map);
800}
801
802__setup("netdev=", netdev_boot_setup);
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818int dev_get_iflink(const struct net_device *dev)
819{
820 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
821 return dev->netdev_ops->ndo_get_iflink(dev);
822
823 return dev->ifindex;
824}
825EXPORT_SYMBOL(dev_get_iflink);
826
827
828
829
830
831
832
833
834
835
836int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
837{
838 struct ip_tunnel_info *info;
839
840 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
841 return -EINVAL;
842
843 info = skb_tunnel_info_unclone(skb);
844 if (!info)
845 return -ENOMEM;
846 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
847 return -EINVAL;
848
849 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
850}
851EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
852
853static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack)
854{
855 int k = stack->num_paths++;
856
857 if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX))
858 return NULL;
859
860 return &stack->path[k];
861}
862
863int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
864 struct net_device_path_stack *stack)
865{
866 const struct net_device *last_dev;
867 struct net_device_path_ctx ctx = {
868 .dev = dev,
869 .daddr = daddr,
870 };
871 struct net_device_path *path;
872 int ret = 0;
873
874 stack->num_paths = 0;
875 while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) {
876 last_dev = ctx.dev;
877 path = dev_fwd_path(stack);
878 if (!path)
879 return -1;
880
881 memset(path, 0, sizeof(struct net_device_path));
882 ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path);
883 if (ret < 0)
884 return -1;
885
886 if (WARN_ON_ONCE(last_dev == ctx.dev))
887 return -1;
888 }
889 path = dev_fwd_path(stack);
890 if (!path)
891 return -1;
892 path->type = DEV_PATH_ETHERNET;
893 path->dev = ctx.dev;
894
895 return ret;
896}
897EXPORT_SYMBOL_GPL(dev_fill_forward_path);
898
899
900
901
902
903
904
905
906
907
908
909
910
911struct net_device *__dev_get_by_name(struct net *net, const char *name)
912{
913 struct netdev_name_node *node_name;
914
915 node_name = netdev_name_node_lookup(net, name);
916 return node_name ? node_name->dev : NULL;
917}
918EXPORT_SYMBOL(__dev_get_by_name);
919
920
921
922
923
924
925
926
927
928
929
930
931
932struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
933{
934 struct netdev_name_node *node_name;
935
936 node_name = netdev_name_node_lookup_rcu(net, name);
937 return node_name ? node_name->dev : NULL;
938}
939EXPORT_SYMBOL(dev_get_by_name_rcu);
940
941
942
943
944
945
946
947
948
949
950
951
952
953struct net_device *dev_get_by_name(struct net *net, const char *name)
954{
955 struct net_device *dev;
956
957 rcu_read_lock();
958 dev = dev_get_by_name_rcu(net, name);
959 if (dev)
960 dev_hold(dev);
961 rcu_read_unlock();
962 return dev;
963}
964EXPORT_SYMBOL(dev_get_by_name);
965
966
967
968
969
970
971
972
973
974
975
976
977
978struct net_device *__dev_get_by_index(struct net *net, int ifindex)
979{
980 struct net_device *dev;
981 struct hlist_head *head = dev_index_hash(net, ifindex);
982
983 hlist_for_each_entry(dev, head, index_hlist)
984 if (dev->ifindex == ifindex)
985 return dev;
986
987 return NULL;
988}
989EXPORT_SYMBOL(__dev_get_by_index);
990
991
992
993
994
995
996
997
998
999
1000
1001
1002struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
1003{
1004 struct net_device *dev;
1005 struct hlist_head *head = dev_index_hash(net, ifindex);
1006
1007 hlist_for_each_entry_rcu(dev, head, index_hlist)
1008 if (dev->ifindex == ifindex)
1009 return dev;
1010
1011 return NULL;
1012}
1013EXPORT_SYMBOL(dev_get_by_index_rcu);
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027struct net_device *dev_get_by_index(struct net *net, int ifindex)
1028{
1029 struct net_device *dev;
1030
1031 rcu_read_lock();
1032 dev = dev_get_by_index_rcu(net, ifindex);
1033 if (dev)
1034 dev_hold(dev);
1035 rcu_read_unlock();
1036 return dev;
1037}
1038EXPORT_SYMBOL(dev_get_by_index);
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050struct net_device *dev_get_by_napi_id(unsigned int napi_id)
1051{
1052 struct napi_struct *napi;
1053
1054 WARN_ON_ONCE(!rcu_read_lock_held());
1055
1056 if (napi_id < MIN_NAPI_ID)
1057 return NULL;
1058
1059 napi = napi_by_id(napi_id);
1060
1061 return napi ? napi->dev : NULL;
1062}
1063EXPORT_SYMBOL(dev_get_by_napi_id);
1064
1065
1066
1067
1068
1069
1070
1071int netdev_get_name(struct net *net, char *name, int ifindex)
1072{
1073 struct net_device *dev;
1074 int ret;
1075
1076 down_read(&devnet_rename_sem);
1077 rcu_read_lock();
1078
1079 dev = dev_get_by_index_rcu(net, ifindex);
1080 if (!dev) {
1081 ret = -ENODEV;
1082 goto out;
1083 }
1084
1085 strcpy(name, dev->name);
1086
1087 ret = 0;
1088out:
1089 rcu_read_unlock();
1090 up_read(&devnet_rename_sem);
1091 return ret;
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1109 const char *ha)
1110{
1111 struct net_device *dev;
1112
1113 for_each_netdev_rcu(net, dev)
1114 if (dev->type == type &&
1115 !memcmp(dev->dev_addr, ha, dev->addr_len))
1116 return dev;
1117
1118 return NULL;
1119}
1120EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
1121
1122struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
1123{
1124 struct net_device *dev, *ret = NULL;
1125
1126 rcu_read_lock();
1127 for_each_netdev_rcu(net, dev)
1128 if (dev->type == type) {
1129 dev_hold(dev);
1130 ret = dev;
1131 break;
1132 }
1133 rcu_read_unlock();
1134 return ret;
1135}
1136EXPORT_SYMBOL(dev_getfirstbyhwtype);
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1150 unsigned short mask)
1151{
1152 struct net_device *dev, *ret;
1153
1154 ASSERT_RTNL();
1155
1156 ret = NULL;
1157 for_each_netdev(net, dev) {
1158 if (((dev->flags ^ if_flags) & mask) == 0) {
1159 ret = dev;
1160 break;
1161 }
1162 }
1163 return ret;
1164}
1165EXPORT_SYMBOL(__dev_get_by_flags);
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175bool dev_valid_name(const char *name)
1176{
1177 if (*name == '\0')
1178 return false;
1179 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
1180 return false;
1181 if (!strcmp(name, ".") || !strcmp(name, ".."))
1182 return false;
1183
1184 while (*name) {
1185 if (*name == '/' || *name == ':' || isspace(*name))
1186 return false;
1187 name++;
1188 }
1189 return true;
1190}
1191EXPORT_SYMBOL(dev_valid_name);
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1209{
1210 int i = 0;
1211 const char *p;
1212 const int max_netdevices = 8*PAGE_SIZE;
1213 unsigned long *inuse;
1214 struct net_device *d;
1215
1216 if (!dev_valid_name(name))
1217 return -EINVAL;
1218
1219 p = strchr(name, '%');
1220 if (p) {
1221
1222
1223
1224
1225
1226 if (p[1] != 'd' || strchr(p + 2, '%'))
1227 return -EINVAL;
1228
1229
1230 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1231 if (!inuse)
1232 return -ENOMEM;
1233
1234 for_each_netdev(net, d) {
1235 struct netdev_name_node *name_node;
1236 list_for_each_entry(name_node, &d->name_node->list, list) {
1237 if (!sscanf(name_node->name, name, &i))
1238 continue;
1239 if (i < 0 || i >= max_netdevices)
1240 continue;
1241
1242
1243 snprintf(buf, IFNAMSIZ, name, i);
1244 if (!strncmp(buf, name_node->name, IFNAMSIZ))
1245 set_bit(i, inuse);
1246 }
1247 if (!sscanf(d->name, name, &i))
1248 continue;
1249 if (i < 0 || i >= max_netdevices)
1250 continue;
1251
1252
1253 snprintf(buf, IFNAMSIZ, name, i);
1254 if (!strncmp(buf, d->name, IFNAMSIZ))
1255 set_bit(i, inuse);
1256 }
1257
1258 i = find_first_zero_bit(inuse, max_netdevices);
1259 free_page((unsigned long) inuse);
1260 }
1261
1262 snprintf(buf, IFNAMSIZ, name, i);
1263 if (!__dev_get_by_name(net, buf))
1264 return i;
1265
1266
1267
1268
1269
1270 return -ENFILE;
1271}
1272
1273static int dev_alloc_name_ns(struct net *net,
1274 struct net_device *dev,
1275 const char *name)
1276{
1277 char buf[IFNAMSIZ];
1278 int ret;
1279
1280 BUG_ON(!net);
1281 ret = __dev_alloc_name(net, name, buf);
1282 if (ret >= 0)
1283 strlcpy(dev->name, buf, IFNAMSIZ);
1284 return ret;
1285}
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301int dev_alloc_name(struct net_device *dev, const char *name)
1302{
1303 return dev_alloc_name_ns(dev_net(dev), dev, name);
1304}
1305EXPORT_SYMBOL(dev_alloc_name);
1306
1307static int dev_get_valid_name(struct net *net, struct net_device *dev,
1308 const char *name)
1309{
1310 BUG_ON(!net);
1311
1312 if (!dev_valid_name(name))
1313 return -EINVAL;
1314
1315 if (strchr(name, '%'))
1316 return dev_alloc_name_ns(net, dev, name);
1317 else if (__dev_get_by_name(net, name))
1318 return -EEXIST;
1319 else if (dev->name != name)
1320 strlcpy(dev->name, name, IFNAMSIZ);
1321
1322 return 0;
1323}
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333int dev_change_name(struct net_device *dev, const char *newname)
1334{
1335 unsigned char old_assign_type;
1336 char oldname[IFNAMSIZ];
1337 int err = 0;
1338 int ret;
1339 struct net *net;
1340
1341 ASSERT_RTNL();
1342 BUG_ON(!dev_net(dev));
1343
1344 net = dev_net(dev);
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358 if (dev->flags & IFF_UP &&
1359 likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
1360 return -EBUSY;
1361
1362 down_write(&devnet_rename_sem);
1363
1364 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1365 up_write(&devnet_rename_sem);
1366 return 0;
1367 }
1368
1369 memcpy(oldname, dev->name, IFNAMSIZ);
1370
1371 err = dev_get_valid_name(net, dev, newname);
1372 if (err < 0) {
1373 up_write(&devnet_rename_sem);
1374 return err;
1375 }
1376
1377 if (oldname[0] && !strchr(oldname, '%'))
1378 netdev_info(dev, "renamed from %s\n", oldname);
1379
1380 old_assign_type = dev->name_assign_type;
1381 dev->name_assign_type = NET_NAME_RENAMED;
1382
1383rollback:
1384 ret = device_rename(&dev->dev, dev->name);
1385 if (ret) {
1386 memcpy(dev->name, oldname, IFNAMSIZ);
1387 dev->name_assign_type = old_assign_type;
1388 up_write(&devnet_rename_sem);
1389 return ret;
1390 }
1391
1392 up_write(&devnet_rename_sem);
1393
1394 netdev_adjacent_rename_links(dev, oldname);
1395
1396 write_lock_bh(&dev_base_lock);
1397 netdev_name_node_del(dev->name_node);
1398 write_unlock_bh(&dev_base_lock);
1399
1400 synchronize_rcu();
1401
1402 write_lock_bh(&dev_base_lock);
1403 netdev_name_node_add(net, dev->name_node);
1404 write_unlock_bh(&dev_base_lock);
1405
1406 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1407 ret = notifier_to_errno(ret);
1408
1409 if (ret) {
1410
1411 if (err >= 0) {
1412 err = ret;
1413 down_write(&devnet_rename_sem);
1414 memcpy(dev->name, oldname, IFNAMSIZ);
1415 memcpy(oldname, newname, IFNAMSIZ);
1416 dev->name_assign_type = old_assign_type;
1417 old_assign_type = NET_NAME_RENAMED;
1418 goto rollback;
1419 } else {
1420 pr_err("%s: name change rollback failed: %d\n",
1421 dev->name, ret);
1422 }
1423 }
1424
1425 return err;
1426}
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1437{
1438 struct dev_ifalias *new_alias = NULL;
1439
1440 if (len >= IFALIASZ)
1441 return -EINVAL;
1442
1443 if (len) {
1444 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1445 if (!new_alias)
1446 return -ENOMEM;
1447
1448 memcpy(new_alias->ifalias, alias, len);
1449 new_alias->ifalias[len] = 0;
1450 }
1451
1452 mutex_lock(&ifalias_mutex);
1453 new_alias = rcu_replace_pointer(dev->ifalias, new_alias,
1454 mutex_is_locked(&ifalias_mutex));
1455 mutex_unlock(&ifalias_mutex);
1456
1457 if (new_alias)
1458 kfree_rcu(new_alias, rcuhead);
1459
1460 return len;
1461}
1462EXPORT_SYMBOL(dev_set_alias);
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1474{
1475 const struct dev_ifalias *alias;
1476 int ret = 0;
1477
1478 rcu_read_lock();
1479 alias = rcu_dereference(dev->ifalias);
1480 if (alias)
1481 ret = snprintf(name, len, "%s", alias->ifalias);
1482 rcu_read_unlock();
1483
1484 return ret;
1485}
1486
1487
1488
1489
1490
1491
1492
1493void netdev_features_change(struct net_device *dev)
1494{
1495 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1496}
1497EXPORT_SYMBOL(netdev_features_change);
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507void netdev_state_change(struct net_device *dev)
1508{
1509 if (dev->flags & IFF_UP) {
1510 struct netdev_notifier_change_info change_info = {
1511 .info.dev = dev,
1512 };
1513
1514 call_netdevice_notifiers_info(NETDEV_CHANGE,
1515 &change_info.info);
1516 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1517 }
1518}
1519EXPORT_SYMBOL(netdev_state_change);
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532void __netdev_notify_peers(struct net_device *dev)
1533{
1534 ASSERT_RTNL();
1535 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1536 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1537}
1538EXPORT_SYMBOL(__netdev_notify_peers);
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550void netdev_notify_peers(struct net_device *dev)
1551{
1552 rtnl_lock();
1553 __netdev_notify_peers(dev);
1554 rtnl_unlock();
1555}
1556EXPORT_SYMBOL(netdev_notify_peers);
1557
1558static int napi_threaded_poll(void *data);
1559
1560static int napi_kthread_create(struct napi_struct *n)
1561{
1562 int err = 0;
1563
1564
1565
1566
1567
1568 n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
1569 n->dev->name, n->napi_id);
1570 if (IS_ERR(n->thread)) {
1571 err = PTR_ERR(n->thread);
1572 pr_err("kthread_run failed with err %d\n", err);
1573 n->thread = NULL;
1574 }
1575
1576 return err;
1577}
1578
1579static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1580{
1581 const struct net_device_ops *ops = dev->netdev_ops;
1582 int ret;
1583
1584 ASSERT_RTNL();
1585
1586 if (!netif_device_present(dev)) {
1587
1588 if (dev->dev.parent)
1589 pm_runtime_resume(dev->dev.parent);
1590 if (!netif_device_present(dev))
1591 return -ENODEV;
1592 }
1593
1594
1595
1596
1597
1598 netpoll_poll_disable(dev);
1599
1600 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
1601 ret = notifier_to_errno(ret);
1602 if (ret)
1603 return ret;
1604
1605 set_bit(__LINK_STATE_START, &dev->state);
1606
1607 if (ops->ndo_validate_addr)
1608 ret = ops->ndo_validate_addr(dev);
1609
1610 if (!ret && ops->ndo_open)
1611 ret = ops->ndo_open(dev);
1612
1613 netpoll_poll_enable(dev);
1614
1615 if (ret)
1616 clear_bit(__LINK_STATE_START, &dev->state);
1617 else {
1618 dev->flags |= IFF_UP;
1619 dev_set_rx_mode(dev);
1620 dev_activate(dev);
1621 add_device_randomness(dev->dev_addr, dev->addr_len);
1622 }
1623
1624 return ret;
1625}
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1641{
1642 int ret;
1643
1644 if (dev->flags & IFF_UP)
1645 return 0;
1646
1647 ret = __dev_open(dev, extack);
1648 if (ret < 0)
1649 return ret;
1650
1651 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1652 call_netdevice_notifiers(NETDEV_UP, dev);
1653
1654 return ret;
1655}
1656EXPORT_SYMBOL(dev_open);
1657
1658static void __dev_close_many(struct list_head *head)
1659{
1660 struct net_device *dev;
1661
1662 ASSERT_RTNL();
1663 might_sleep();
1664
1665 list_for_each_entry(dev, head, close_list) {
1666
1667 netpoll_poll_disable(dev);
1668
1669 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1670
1671 clear_bit(__LINK_STATE_START, &dev->state);
1672
1673
1674
1675
1676
1677
1678
1679 smp_mb__after_atomic();
1680 }
1681
1682 dev_deactivate_many(head);
1683
1684 list_for_each_entry(dev, head, close_list) {
1685 const struct net_device_ops *ops = dev->netdev_ops;
1686
1687
1688
1689
1690
1691
1692
1693
1694 if (ops->ndo_stop)
1695 ops->ndo_stop(dev);
1696
1697 dev->flags &= ~IFF_UP;
1698 netpoll_poll_enable(dev);
1699 }
1700}
1701
1702static void __dev_close(struct net_device *dev)
1703{
1704 LIST_HEAD(single);
1705
1706 list_add(&dev->close_list, &single);
1707 __dev_close_many(&single);
1708 list_del(&single);
1709}
1710
1711void dev_close_many(struct list_head *head, bool unlink)
1712{
1713 struct net_device *dev, *tmp;
1714
1715
1716 list_for_each_entry_safe(dev, tmp, head, close_list)
1717 if (!(dev->flags & IFF_UP))
1718 list_del_init(&dev->close_list);
1719
1720 __dev_close_many(head);
1721
1722 list_for_each_entry_safe(dev, tmp, head, close_list) {
1723 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1724 call_netdevice_notifiers(NETDEV_DOWN, dev);
1725 if (unlink)
1726 list_del_init(&dev->close_list);
1727 }
1728}
1729EXPORT_SYMBOL(dev_close_many);
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740void dev_close(struct net_device *dev)
1741{
1742 if (dev->flags & IFF_UP) {
1743 LIST_HEAD(single);
1744
1745 list_add(&dev->close_list, &single);
1746 dev_close_many(&single, true);
1747 list_del(&single);
1748 }
1749}
1750EXPORT_SYMBOL(dev_close);
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761void dev_disable_lro(struct net_device *dev)
1762{
1763 struct net_device *lower_dev;
1764 struct list_head *iter;
1765
1766 dev->wanted_features &= ~NETIF_F_LRO;
1767 netdev_update_features(dev);
1768
1769 if (unlikely(dev->features & NETIF_F_LRO))
1770 netdev_WARN(dev, "failed to disable LRO!\n");
1771
1772 netdev_for_each_lower_dev(dev, lower_dev, iter)
1773 dev_disable_lro(lower_dev);
1774}
1775EXPORT_SYMBOL(dev_disable_lro);
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785static void dev_disable_gro_hw(struct net_device *dev)
1786{
1787 dev->wanted_features &= ~NETIF_F_GRO_HW;
1788 netdev_update_features(dev);
1789
1790 if (unlikely(dev->features & NETIF_F_GRO_HW))
1791 netdev_WARN(dev, "failed to disable GRO_HW!\n");
1792}
1793
1794const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1795{
1796#define N(val) \
1797 case NETDEV_##val: \
1798 return "NETDEV_" __stringify(val);
1799 switch (cmd) {
1800 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1801 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1802 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1803 N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
1804 N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
1805 N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
1806 N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
1807 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1808 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1809 N(PRE_CHANGEADDR)
1810 }
1811#undef N
1812 return "UNKNOWN_NETDEV_EVENT";
1813}
1814EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1815
1816static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1817 struct net_device *dev)
1818{
1819 struct netdev_notifier_info info = {
1820 .dev = dev,
1821 };
1822
1823 return nb->notifier_call(nb, val, &info);
1824}
1825
1826static int call_netdevice_register_notifiers(struct notifier_block *nb,
1827 struct net_device *dev)
1828{
1829 int err;
1830
1831 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1832 err = notifier_to_errno(err);
1833 if (err)
1834 return err;
1835
1836 if (!(dev->flags & IFF_UP))
1837 return 0;
1838
1839 call_netdevice_notifier(nb, NETDEV_UP, dev);
1840 return 0;
1841}
1842
1843static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
1844 struct net_device *dev)
1845{
1846 if (dev->flags & IFF_UP) {
1847 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1848 dev);
1849 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1850 }
1851 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1852}
1853
1854static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
1855 struct net *net)
1856{
1857 struct net_device *dev;
1858 int err;
1859
1860 for_each_netdev(net, dev) {
1861 err = call_netdevice_register_notifiers(nb, dev);
1862 if (err)
1863 goto rollback;
1864 }
1865 return 0;
1866
1867rollback:
1868 for_each_netdev_continue_reverse(net, dev)
1869 call_netdevice_unregister_notifiers(nb, dev);
1870 return err;
1871}
1872
1873static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
1874 struct net *net)
1875{
1876 struct net_device *dev;
1877
1878 for_each_netdev(net, dev)
1879 call_netdevice_unregister_notifiers(nb, dev);
1880}
1881
1882static int dev_boot_phase = 1;
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898int register_netdevice_notifier(struct notifier_block *nb)
1899{
1900 struct net *net;
1901 int err;
1902
1903
1904 down_write(&pernet_ops_rwsem);
1905 rtnl_lock();
1906 err = raw_notifier_chain_register(&netdev_chain, nb);
1907 if (err)
1908 goto unlock;
1909 if (dev_boot_phase)
1910 goto unlock;
1911 for_each_net(net) {
1912 err = call_netdevice_register_net_notifiers(nb, net);
1913 if (err)
1914 goto rollback;
1915 }
1916
1917unlock:
1918 rtnl_unlock();
1919 up_write(&pernet_ops_rwsem);
1920 return err;
1921
1922rollback:
1923 for_each_net_continue_reverse(net)
1924 call_netdevice_unregister_net_notifiers(nb, net);
1925
1926 raw_notifier_chain_unregister(&netdev_chain, nb);
1927 goto unlock;
1928}
1929EXPORT_SYMBOL(register_netdevice_notifier);
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945int unregister_netdevice_notifier(struct notifier_block *nb)
1946{
1947 struct net *net;
1948 int err;
1949
1950
1951 down_write(&pernet_ops_rwsem);
1952 rtnl_lock();
1953 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1954 if (err)
1955 goto unlock;
1956
1957 for_each_net(net)
1958 call_netdevice_unregister_net_notifiers(nb, net);
1959
1960unlock:
1961 rtnl_unlock();
1962 up_write(&pernet_ops_rwsem);
1963 return err;
1964}
1965EXPORT_SYMBOL(unregister_netdevice_notifier);
1966
1967static int __register_netdevice_notifier_net(struct net *net,
1968 struct notifier_block *nb,
1969 bool ignore_call_fail)
1970{
1971 int err;
1972
1973 err = raw_notifier_chain_register(&net->netdev_chain, nb);
1974 if (err)
1975 return err;
1976 if (dev_boot_phase)
1977 return 0;
1978
1979 err = call_netdevice_register_net_notifiers(nb, net);
1980 if (err && !ignore_call_fail)
1981 goto chain_unregister;
1982
1983 return 0;
1984
1985chain_unregister:
1986 raw_notifier_chain_unregister(&net->netdev_chain, nb);
1987 return err;
1988}
1989
1990static int __unregister_netdevice_notifier_net(struct net *net,
1991 struct notifier_block *nb)
1992{
1993 int err;
1994
1995 err = raw_notifier_chain_unregister(&net->netdev_chain, nb);
1996 if (err)
1997 return err;
1998
1999 call_netdevice_unregister_net_notifiers(nb, net);
2000 return 0;
2001}
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
2019{
2020 int err;
2021
2022 rtnl_lock();
2023 err = __register_netdevice_notifier_net(net, nb, false);
2024 rtnl_unlock();
2025 return err;
2026}
2027EXPORT_SYMBOL(register_netdevice_notifier_net);
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045int unregister_netdevice_notifier_net(struct net *net,
2046 struct notifier_block *nb)
2047{
2048 int err;
2049
2050 rtnl_lock();
2051 err = __unregister_netdevice_notifier_net(net, nb);
2052 rtnl_unlock();
2053 return err;
2054}
2055EXPORT_SYMBOL(unregister_netdevice_notifier_net);
2056
2057int register_netdevice_notifier_dev_net(struct net_device *dev,
2058 struct notifier_block *nb,
2059 struct netdev_net_notifier *nn)
2060{
2061 int err;
2062
2063 rtnl_lock();
2064 err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
2065 if (!err) {
2066 nn->nb = nb;
2067 list_add(&nn->list, &dev->net_notifier_list);
2068 }
2069 rtnl_unlock();
2070 return err;
2071}
2072EXPORT_SYMBOL(register_netdevice_notifier_dev_net);
2073
2074int unregister_netdevice_notifier_dev_net(struct net_device *dev,
2075 struct notifier_block *nb,
2076 struct netdev_net_notifier *nn)
2077{
2078 int err;
2079
2080 rtnl_lock();
2081 list_del(&nn->list);
2082 err = __unregister_netdevice_notifier_net(dev_net(dev), nb);
2083 rtnl_unlock();
2084 return err;
2085}
2086EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net);
2087
2088static void move_netdevice_notifiers_dev_net(struct net_device *dev,
2089 struct net *net)
2090{
2091 struct netdev_net_notifier *nn;
2092
2093 list_for_each_entry(nn, &dev->net_notifier_list, list) {
2094 __unregister_netdevice_notifier_net(dev_net(dev), nn->nb);
2095 __register_netdevice_notifier_net(net, nn->nb, true);
2096 }
2097}
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108static int call_netdevice_notifiers_info(unsigned long val,
2109 struct netdev_notifier_info *info)
2110{
2111 struct net *net = dev_net(info->dev);
2112 int ret;
2113
2114 ASSERT_RTNL();
2115
2116
2117
2118
2119
2120 ret = raw_notifier_call_chain(&net->netdev_chain, val, info);
2121 if (ret & NOTIFY_STOP_MASK)
2122 return ret;
2123 return raw_notifier_call_chain(&netdev_chain, val, info);
2124}
2125
2126static int call_netdevice_notifiers_extack(unsigned long val,
2127 struct net_device *dev,
2128 struct netlink_ext_ack *extack)
2129{
2130 struct netdev_notifier_info info = {
2131 .dev = dev,
2132 .extack = extack,
2133 };
2134
2135 return call_netdevice_notifiers_info(val, &info);
2136}
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
2148{
2149 return call_netdevice_notifiers_extack(val, dev, NULL);
2150}
2151EXPORT_SYMBOL(call_netdevice_notifiers);
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162static int call_netdevice_notifiers_mtu(unsigned long val,
2163 struct net_device *dev, u32 arg)
2164{
2165 struct netdev_notifier_info_ext info = {
2166 .info.dev = dev,
2167 .ext.mtu = arg,
2168 };
2169
2170 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
2171
2172 return call_netdevice_notifiers_info(val, &info.info);
2173}
2174
2175#ifdef CONFIG_NET_INGRESS
2176static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
2177
2178void net_inc_ingress_queue(void)
2179{
2180 static_branch_inc(&ingress_needed_key);
2181}
2182EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
2183
2184void net_dec_ingress_queue(void)
2185{
2186 static_branch_dec(&ingress_needed_key);
2187}
2188EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
2189#endif
2190
2191#ifdef CONFIG_NET_EGRESS
2192static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
2193
2194void net_inc_egress_queue(void)
2195{
2196 static_branch_inc(&egress_needed_key);
2197}
2198EXPORT_SYMBOL_GPL(net_inc_egress_queue);
2199
2200void net_dec_egress_queue(void)
2201{
2202 static_branch_dec(&egress_needed_key);
2203}
2204EXPORT_SYMBOL_GPL(net_dec_egress_queue);
2205#endif
2206
2207static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
2208#ifdef CONFIG_JUMP_LABEL
2209static atomic_t netstamp_needed_deferred;
2210static atomic_t netstamp_wanted;
2211static void netstamp_clear(struct work_struct *work)
2212{
2213 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
2214 int wanted;
2215
2216 wanted = atomic_add_return(deferred, &netstamp_wanted);
2217 if (wanted > 0)
2218 static_branch_enable(&netstamp_needed_key);
2219 else
2220 static_branch_disable(&netstamp_needed_key);
2221}
2222static DECLARE_WORK(netstamp_work, netstamp_clear);
2223#endif
2224
2225void net_enable_timestamp(void)
2226{
2227#ifdef CONFIG_JUMP_LABEL
2228 int wanted;
2229
2230 while (1) {
2231 wanted = atomic_read(&netstamp_wanted);
2232 if (wanted <= 0)
2233 break;
2234 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
2235 return;
2236 }
2237 atomic_inc(&netstamp_needed_deferred);
2238 schedule_work(&netstamp_work);
2239#else
2240 static_branch_inc(&netstamp_needed_key);
2241#endif
2242}
2243EXPORT_SYMBOL(net_enable_timestamp);
2244
2245void net_disable_timestamp(void)
2246{
2247#ifdef CONFIG_JUMP_LABEL
2248 int wanted;
2249
2250 while (1) {
2251 wanted = atomic_read(&netstamp_wanted);
2252 if (wanted <= 1)
2253 break;
2254 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
2255 return;
2256 }
2257 atomic_dec(&netstamp_needed_deferred);
2258 schedule_work(&netstamp_work);
2259#else
2260 static_branch_dec(&netstamp_needed_key);
2261#endif
2262}
2263EXPORT_SYMBOL(net_disable_timestamp);
2264
2265static inline void net_timestamp_set(struct sk_buff *skb)
2266{
2267 skb->tstamp = 0;
2268 if (static_branch_unlikely(&netstamp_needed_key))
2269 __net_timestamp(skb);
2270}
2271
2272#define net_timestamp_check(COND, SKB) \
2273 if (static_branch_unlikely(&netstamp_needed_key)) { \
2274 if ((COND) && !(SKB)->tstamp) \
2275 __net_timestamp(SKB); \
2276 } \
2277
2278bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2279{
2280 return __is_skb_forwardable(dev, skb, true);
2281}
2282EXPORT_SYMBOL_GPL(is_skb_forwardable);
2283
2284static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb,
2285 bool check_mtu)
2286{
2287 int ret = ____dev_forward_skb(dev, skb, check_mtu);
2288
2289 if (likely(!ret)) {
2290 skb->protocol = eth_type_trans(skb, dev);
2291 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
2292 }
2293
2294 return ret;
2295}
2296
2297int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2298{
2299 return __dev_forward_skb2(dev, skb, true);
2300}
2301EXPORT_SYMBOL_GPL(__dev_forward_skb);
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2322{
2323 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
2324}
2325EXPORT_SYMBOL_GPL(dev_forward_skb);
2326
2327int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb)
2328{
2329 return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb);
2330}
2331
2332static inline int deliver_skb(struct sk_buff *skb,
2333 struct packet_type *pt_prev,
2334 struct net_device *orig_dev)
2335{
2336 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
2337 return -ENOMEM;
2338 refcount_inc(&skb->users);
2339 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2340}
2341
2342static inline void deliver_ptype_list_skb(struct sk_buff *skb,
2343 struct packet_type **pt,
2344 struct net_device *orig_dev,
2345 __be16 type,
2346 struct list_head *ptype_list)
2347{
2348 struct packet_type *ptype, *pt_prev = *pt;
2349
2350 list_for_each_entry_rcu(ptype, ptype_list, list) {
2351 if (ptype->type != type)
2352 continue;
2353 if (pt_prev)
2354 deliver_skb(skb, pt_prev, orig_dev);
2355 pt_prev = ptype;
2356 }
2357 *pt = pt_prev;
2358}
2359
2360static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
2361{
2362 if (!ptype->af_packet_priv || !skb->sk)
2363 return false;
2364
2365 if (ptype->id_match)
2366 return ptype->id_match(ptype, skb->sk);
2367 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
2368 return true;
2369
2370 return false;
2371}
2372
2373
2374
2375
2376
2377
2378bool dev_nit_active(struct net_device *dev)
2379{
2380 return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
2381}
2382EXPORT_SYMBOL_GPL(dev_nit_active);
2383
2384
2385
2386
2387
2388
2389void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
2390{
2391 struct packet_type *ptype;
2392 struct sk_buff *skb2 = NULL;
2393 struct packet_type *pt_prev = NULL;
2394 struct list_head *ptype_list = &ptype_all;
2395
2396 rcu_read_lock();
2397again:
2398 list_for_each_entry_rcu(ptype, ptype_list, list) {
2399 if (ptype->ignore_outgoing)
2400 continue;
2401
2402
2403
2404
2405 if (skb_loop_sk(ptype, skb))
2406 continue;
2407
2408 if (pt_prev) {
2409 deliver_skb(skb2, pt_prev, skb->dev);
2410 pt_prev = ptype;
2411 continue;
2412 }
2413
2414
2415 skb2 = skb_clone(skb, GFP_ATOMIC);
2416 if (!skb2)
2417 goto out_unlock;
2418
2419 net_timestamp_set(skb2);
2420
2421
2422
2423
2424
2425 skb_reset_mac_header(skb2);
2426
2427 if (skb_network_header(skb2) < skb2->data ||
2428 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2429 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2430 ntohs(skb2->protocol),
2431 dev->name);
2432 skb_reset_network_header(skb2);
2433 }
2434
2435 skb2->transport_header = skb2->network_header;
2436 skb2->pkt_type = PACKET_OUTGOING;
2437 pt_prev = ptype;
2438 }
2439
2440 if (ptype_list == &ptype_all) {
2441 ptype_list = &dev->ptype_all;
2442 goto again;
2443 }
2444out_unlock:
2445 if (pt_prev) {
2446 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2447 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2448 else
2449 kfree_skb(skb2);
2450 }
2451 rcu_read_unlock();
2452}
2453EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2469{
2470 int i;
2471 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2472
2473
2474 if (tc->offset + tc->count > txq) {
2475 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2476 dev->num_tc = 0;
2477 return;
2478 }
2479
2480
2481 for (i = 1; i < TC_BITMASK + 1; i++) {
2482 int q = netdev_get_prio_tc_map(dev, i);
2483
2484 tc = &dev->tc_to_txq[q];
2485 if (tc->offset + tc->count > txq) {
2486 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2487 i, q);
2488 netdev_set_prio_tc_map(dev, i, 0);
2489 }
2490 }
2491}
2492
2493int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2494{
2495 if (dev->num_tc) {
2496 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2497 int i;
2498
2499
2500 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2501 if ((txq - tc->offset) < tc->count)
2502 return i;
2503 }
2504
2505
2506 return -1;
2507 }
2508
2509 return 0;
2510}
2511EXPORT_SYMBOL(netdev_txq_to_tc);
2512
2513#ifdef CONFIG_XPS
2514static struct static_key xps_needed __read_mostly;
2515static struct static_key xps_rxqs_needed __read_mostly;
2516static DEFINE_MUTEX(xps_map_mutex);
2517#define xmap_dereference(P) \
2518 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2519
2520static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2521 struct xps_dev_maps *old_maps, int tci, u16 index)
2522{
2523 struct xps_map *map = NULL;
2524 int pos;
2525
2526 if (dev_maps)
2527 map = xmap_dereference(dev_maps->attr_map[tci]);
2528 if (!map)
2529 return false;
2530
2531 for (pos = map->len; pos--;) {
2532 if (map->queues[pos] != index)
2533 continue;
2534
2535 if (map->len > 1) {
2536 map->queues[pos] = map->queues[--map->len];
2537 break;
2538 }
2539
2540 if (old_maps)
2541 RCU_INIT_POINTER(old_maps->attr_map[tci], NULL);
2542 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2543 kfree_rcu(map, rcu);
2544 return false;
2545 }
2546
2547 return true;
2548}
2549
2550static bool remove_xps_queue_cpu(struct net_device *dev,
2551 struct xps_dev_maps *dev_maps,
2552 int cpu, u16 offset, u16 count)
2553{
2554 int num_tc = dev_maps->num_tc;
2555 bool active = false;
2556 int tci;
2557
2558 for (tci = cpu * num_tc; num_tc--; tci++) {
2559 int i, j;
2560
2561 for (i = count, j = offset; i--; j++) {
2562 if (!remove_xps_queue(dev_maps, NULL, tci, j))
2563 break;
2564 }
2565
2566 active |= i < 0;
2567 }
2568
2569 return active;
2570}
2571
2572static void reset_xps_maps(struct net_device *dev,
2573 struct xps_dev_maps *dev_maps,
2574 enum xps_map_type type)
2575{
2576 static_key_slow_dec_cpuslocked(&xps_needed);
2577 if (type == XPS_RXQS)
2578 static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2579
2580 RCU_INIT_POINTER(dev->xps_maps[type], NULL);
2581
2582 kfree_rcu(dev_maps, rcu);
2583}
2584
2585static void clean_xps_maps(struct net_device *dev, enum xps_map_type type,
2586 u16 offset, u16 count)
2587{
2588 struct xps_dev_maps *dev_maps;
2589 bool active = false;
2590 int i, j;
2591
2592 dev_maps = xmap_dereference(dev->xps_maps[type]);
2593 if (!dev_maps)
2594 return;
2595
2596 for (j = 0; j < dev_maps->nr_ids; j++)
2597 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count);
2598 if (!active)
2599 reset_xps_maps(dev, dev_maps, type);
2600
2601 if (type == XPS_CPUS) {
2602 for (i = offset + (count - 1); count--; i--)
2603 netdev_queue_numa_node_write(
2604 netdev_get_tx_queue(dev, i), NUMA_NO_NODE);
2605 }
2606}
2607
2608static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2609 u16 count)
2610{
2611 if (!static_key_false(&xps_needed))
2612 return;
2613
2614 cpus_read_lock();
2615 mutex_lock(&xps_map_mutex);
2616
2617 if (static_key_false(&xps_rxqs_needed))
2618 clean_xps_maps(dev, XPS_RXQS, offset, count);
2619
2620 clean_xps_maps(dev, XPS_CPUS, offset, count);
2621
2622 mutex_unlock(&xps_map_mutex);
2623 cpus_read_unlock();
2624}
2625
2626static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2627{
2628 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2629}
2630
2631static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2632 u16 index, bool is_rxqs_map)
2633{
2634 struct xps_map *new_map;
2635 int alloc_len = XPS_MIN_MAP_ALLOC;
2636 int i, pos;
2637
2638 for (pos = 0; map && pos < map->len; pos++) {
2639 if (map->queues[pos] != index)
2640 continue;
2641 return map;
2642 }
2643
2644
2645 if (map) {
2646 if (pos < map->alloc_len)
2647 return map;
2648
2649 alloc_len = map->alloc_len * 2;
2650 }
2651
2652
2653
2654
2655 if (is_rxqs_map)
2656 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2657 else
2658 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2659 cpu_to_node(attr_index));
2660 if (!new_map)
2661 return NULL;
2662
2663 for (i = 0; i < pos; i++)
2664 new_map->queues[i] = map->queues[i];
2665 new_map->alloc_len = alloc_len;
2666 new_map->len = pos;
2667
2668 return new_map;
2669}
2670
2671
2672static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps,
2673 struct xps_dev_maps *new_dev_maps, int index,
2674 int tc, bool skip_tc)
2675{
2676 int i, tci = index * dev_maps->num_tc;
2677 struct xps_map *map;
2678
2679
2680 for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2681 if (i == tc && skip_tc)
2682 continue;
2683
2684
2685 map = xmap_dereference(dev_maps->attr_map[tci]);
2686 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2687 }
2688}
2689
2690
2691int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2692 u16 index, enum xps_map_type type)
2693{
2694 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL;
2695 const unsigned long *online_mask = NULL;
2696 bool active = false, copy = false;
2697 int i, j, tci, numa_node_id = -2;
2698 int maps_sz, num_tc = 1, tc = 0;
2699 struct xps_map *map, *new_map;
2700 unsigned int nr_ids;
2701
2702 if (dev->num_tc) {
2703
2704 num_tc = dev->num_tc;
2705 if (num_tc < 0)
2706 return -EINVAL;
2707
2708
2709 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2710
2711 tc = netdev_txq_to_tc(dev, index);
2712 if (tc < 0)
2713 return -EINVAL;
2714 }
2715
2716 mutex_lock(&xps_map_mutex);
2717
2718 dev_maps = xmap_dereference(dev->xps_maps[type]);
2719 if (type == XPS_RXQS) {
2720 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2721 nr_ids = dev->num_rx_queues;
2722 } else {
2723 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2724 if (num_possible_cpus() > 1)
2725 online_mask = cpumask_bits(cpu_online_mask);
2726 nr_ids = nr_cpu_ids;
2727 }
2728
2729 if (maps_sz < L1_CACHE_BYTES)
2730 maps_sz = L1_CACHE_BYTES;
2731
2732
2733
2734
2735
2736
2737 if (dev_maps &&
2738 dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids)
2739 copy = true;
2740
2741
2742 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2743 j < nr_ids;) {
2744 if (!new_dev_maps) {
2745 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2746 if (!new_dev_maps) {
2747 mutex_unlock(&xps_map_mutex);
2748 return -ENOMEM;
2749 }
2750
2751 new_dev_maps->nr_ids = nr_ids;
2752 new_dev_maps->num_tc = num_tc;
2753 }
2754
2755 tci = j * num_tc + tc;
2756 map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL;
2757
2758 map = expand_xps_map(map, j, index, type == XPS_RXQS);
2759 if (!map)
2760 goto error;
2761
2762 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2763 }
2764
2765 if (!new_dev_maps)
2766 goto out_no_new_maps;
2767
2768 if (!dev_maps) {
2769
2770 static_key_slow_inc_cpuslocked(&xps_needed);
2771 if (type == XPS_RXQS)
2772 static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2773 }
2774
2775 for (j = 0; j < nr_ids; j++) {
2776 bool skip_tc = false;
2777
2778 tci = j * num_tc + tc;
2779 if (netif_attr_test_mask(j, mask, nr_ids) &&
2780 netif_attr_test_online(j, online_mask, nr_ids)) {
2781
2782 int pos = 0;
2783
2784 skip_tc = true;
2785
2786 map = xmap_dereference(new_dev_maps->attr_map[tci]);
2787 while ((pos < map->len) && (map->queues[pos] != index))
2788 pos++;
2789
2790 if (pos == map->len)
2791 map->queues[map->len++] = index;
2792#ifdef CONFIG_NUMA
2793 if (type == XPS_CPUS) {
2794 if (numa_node_id == -2)
2795 numa_node_id = cpu_to_node(j);
2796 else if (numa_node_id != cpu_to_node(j))
2797 numa_node_id = -1;
2798 }
2799#endif
2800 }
2801
2802 if (copy)
2803 xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc,
2804 skip_tc);
2805 }
2806
2807 rcu_assign_pointer(dev->xps_maps[type], new_dev_maps);
2808
2809
2810 if (!dev_maps)
2811 goto out_no_old_maps;
2812
2813 for (j = 0; j < dev_maps->nr_ids; j++) {
2814 for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) {
2815 map = xmap_dereference(dev_maps->attr_map[tci]);
2816 if (!map)
2817 continue;
2818
2819 if (copy) {
2820 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2821 if (map == new_map)
2822 continue;
2823 }
2824
2825 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2826 kfree_rcu(map, rcu);
2827 }
2828 }
2829
2830 old_dev_maps = dev_maps;
2831
2832out_no_old_maps:
2833 dev_maps = new_dev_maps;
2834 active = true;
2835
2836out_no_new_maps:
2837 if (type == XPS_CPUS)
2838
2839 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2840 (numa_node_id >= 0) ?
2841 numa_node_id : NUMA_NO_NODE);
2842
2843 if (!dev_maps)
2844 goto out_no_maps;
2845
2846
2847 for (j = 0; j < dev_maps->nr_ids; j++) {
2848 tci = j * dev_maps->num_tc;
2849
2850 for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2851 if (i == tc &&
2852 netif_attr_test_mask(j, mask, dev_maps->nr_ids) &&
2853 netif_attr_test_online(j, online_mask, dev_maps->nr_ids))
2854 continue;
2855
2856 active |= remove_xps_queue(dev_maps,
2857 copy ? old_dev_maps : NULL,
2858 tci, index);
2859 }
2860 }
2861
2862 if (old_dev_maps)
2863 kfree_rcu(old_dev_maps, rcu);
2864
2865
2866 if (!active)
2867 reset_xps_maps(dev, dev_maps, type);
2868
2869out_no_maps:
2870 mutex_unlock(&xps_map_mutex);
2871
2872 return 0;
2873error:
2874
2875 for (j = 0; j < nr_ids; j++) {
2876 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2877 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2878 map = copy ?
2879 xmap_dereference(dev_maps->attr_map[tci]) :
2880 NULL;
2881 if (new_map && new_map != map)
2882 kfree(new_map);
2883 }
2884 }
2885
2886 mutex_unlock(&xps_map_mutex);
2887
2888 kfree(new_dev_maps);
2889 return -ENOMEM;
2890}
2891EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
2892
2893int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2894 u16 index)
2895{
2896 int ret;
2897
2898 cpus_read_lock();
2899 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS);
2900 cpus_read_unlock();
2901
2902 return ret;
2903}
2904EXPORT_SYMBOL(netif_set_xps_queue);
2905
2906#endif
2907static void netdev_unbind_all_sb_channels(struct net_device *dev)
2908{
2909 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2910
2911
2912 while (txq-- != &dev->_tx[0]) {
2913 if (txq->sb_dev)
2914 netdev_unbind_sb_channel(dev, txq->sb_dev);
2915 }
2916}
2917
2918void netdev_reset_tc(struct net_device *dev)
2919{
2920#ifdef CONFIG_XPS
2921 netif_reset_xps_queues_gt(dev, 0);
2922#endif
2923 netdev_unbind_all_sb_channels(dev);
2924
2925
2926 dev->num_tc = 0;
2927 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2928 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2929}
2930EXPORT_SYMBOL(netdev_reset_tc);
2931
2932int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2933{
2934 if (tc >= dev->num_tc)
2935 return -EINVAL;
2936
2937#ifdef CONFIG_XPS
2938 netif_reset_xps_queues(dev, offset, count);
2939#endif
2940 dev->tc_to_txq[tc].count = count;
2941 dev->tc_to_txq[tc].offset = offset;
2942 return 0;
2943}
2944EXPORT_SYMBOL(netdev_set_tc_queue);
2945
2946int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2947{
2948 if (num_tc > TC_MAX_QUEUE)
2949 return -EINVAL;
2950
2951#ifdef CONFIG_XPS
2952 netif_reset_xps_queues_gt(dev, 0);
2953#endif
2954 netdev_unbind_all_sb_channels(dev);
2955
2956 dev->num_tc = num_tc;
2957 return 0;
2958}
2959EXPORT_SYMBOL(netdev_set_num_tc);
2960
2961void netdev_unbind_sb_channel(struct net_device *dev,
2962 struct net_device *sb_dev)
2963{
2964 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2965
2966#ifdef CONFIG_XPS
2967 netif_reset_xps_queues_gt(sb_dev, 0);
2968#endif
2969 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2970 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2971
2972 while (txq-- != &dev->_tx[0]) {
2973 if (txq->sb_dev == sb_dev)
2974 txq->sb_dev = NULL;
2975 }
2976}
2977EXPORT_SYMBOL(netdev_unbind_sb_channel);
2978
2979int netdev_bind_sb_channel_queue(struct net_device *dev,
2980 struct net_device *sb_dev,
2981 u8 tc, u16 count, u16 offset)
2982{
2983
2984 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2985 return -EINVAL;
2986
2987
2988 if ((offset + count) > dev->real_num_tx_queues)
2989 return -EINVAL;
2990
2991
2992 sb_dev->tc_to_txq[tc].count = count;
2993 sb_dev->tc_to_txq[tc].offset = offset;
2994
2995
2996
2997
2998 while (count--)
2999 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
3000
3001 return 0;
3002}
3003EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
3004
3005int netdev_set_sb_channel(struct net_device *dev, u16 channel)
3006{
3007
3008 if (netif_is_multiqueue(dev))
3009 return -ENODEV;
3010
3011
3012
3013
3014
3015
3016 if (channel > S16_MAX)
3017 return -EINVAL;
3018
3019 dev->num_tc = -channel;
3020
3021 return 0;
3022}
3023EXPORT_SYMBOL(netdev_set_sb_channel);
3024
3025
3026
3027
3028
3029int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
3030{
3031 bool disabling;
3032 int rc;
3033
3034 disabling = txq < dev->real_num_tx_queues;
3035
3036 if (txq < 1 || txq > dev->num_tx_queues)
3037 return -EINVAL;
3038
3039 if (dev->reg_state == NETREG_REGISTERED ||
3040 dev->reg_state == NETREG_UNREGISTERING) {
3041 ASSERT_RTNL();
3042
3043 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
3044 txq);
3045 if (rc)
3046 return rc;
3047
3048 if (dev->num_tc)
3049 netif_setup_tc(dev, txq);
3050
3051 dev->real_num_tx_queues = txq;
3052
3053 if (disabling) {
3054 synchronize_net();
3055 qdisc_reset_all_tx_gt(dev, txq);
3056#ifdef CONFIG_XPS
3057 netif_reset_xps_queues_gt(dev, txq);
3058#endif
3059 }
3060 } else {
3061 dev->real_num_tx_queues = txq;
3062 }
3063
3064 return 0;
3065}
3066EXPORT_SYMBOL(netif_set_real_num_tx_queues);
3067
3068#ifdef CONFIG_SYSFS
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
3080{
3081 int rc;
3082
3083 if (rxq < 1 || rxq > dev->num_rx_queues)
3084 return -EINVAL;
3085
3086 if (dev->reg_state == NETREG_REGISTERED) {
3087 ASSERT_RTNL();
3088
3089 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
3090 rxq);
3091 if (rc)
3092 return rc;
3093 }
3094
3095 dev->real_num_rx_queues = rxq;
3096 return 0;
3097}
3098EXPORT_SYMBOL(netif_set_real_num_rx_queues);
3099#endif
3100
3101
3102
3103
3104
3105
3106
3107int netif_get_num_default_rss_queues(void)
3108{
3109 return is_kdump_kernel() ?
3110 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
3111}
3112EXPORT_SYMBOL(netif_get_num_default_rss_queues);
3113
3114static void __netif_reschedule(struct Qdisc *q)
3115{
3116 struct softnet_data *sd;
3117 unsigned long flags;
3118
3119 local_irq_save(flags);
3120 sd = this_cpu_ptr(&softnet_data);
3121 q->next_sched = NULL;
3122 *sd->output_queue_tailp = q;
3123 sd->output_queue_tailp = &q->next_sched;
3124 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3125 local_irq_restore(flags);
3126}
3127
3128void __netif_schedule(struct Qdisc *q)
3129{
3130 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
3131 __netif_reschedule(q);
3132}
3133EXPORT_SYMBOL(__netif_schedule);
3134
3135struct dev_kfree_skb_cb {
3136 enum skb_free_reason reason;
3137};
3138
3139static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
3140{
3141 return (struct dev_kfree_skb_cb *)skb->cb;
3142}
3143
3144void netif_schedule_queue(struct netdev_queue *txq)
3145{
3146 rcu_read_lock();
3147 if (!netif_xmit_stopped(txq)) {
3148 struct Qdisc *q = rcu_dereference(txq->qdisc);
3149
3150 __netif_schedule(q);
3151 }
3152 rcu_read_unlock();
3153}
3154EXPORT_SYMBOL(netif_schedule_queue);
3155
3156void netif_tx_wake_queue(struct netdev_queue *dev_queue)
3157{
3158 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
3159 struct Qdisc *q;
3160
3161 rcu_read_lock();
3162 q = rcu_dereference(dev_queue->qdisc);
3163 __netif_schedule(q);
3164 rcu_read_unlock();
3165 }
3166}
3167EXPORT_SYMBOL(netif_tx_wake_queue);
3168
3169void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
3170{
3171 unsigned long flags;
3172
3173 if (unlikely(!skb))
3174 return;
3175
3176 if (likely(refcount_read(&skb->users) == 1)) {
3177 smp_rmb();
3178 refcount_set(&skb->users, 0);
3179 } else if (likely(!refcount_dec_and_test(&skb->users))) {
3180 return;
3181 }
3182 get_kfree_skb_cb(skb)->reason = reason;
3183 local_irq_save(flags);
3184 skb->next = __this_cpu_read(softnet_data.completion_queue);
3185 __this_cpu_write(softnet_data.completion_queue, skb);
3186 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3187 local_irq_restore(flags);
3188}
3189EXPORT_SYMBOL(__dev_kfree_skb_irq);
3190
3191void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
3192{
3193 if (in_irq() || irqs_disabled())
3194 __dev_kfree_skb_irq(skb, reason);
3195 else
3196 dev_kfree_skb(skb);
3197}
3198EXPORT_SYMBOL(__dev_kfree_skb_any);
3199
3200
3201
3202
3203
3204
3205
3206
3207void netif_device_detach(struct net_device *dev)
3208{
3209 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
3210 netif_running(dev)) {
3211 netif_tx_stop_all_queues(dev);
3212 }
3213}
3214EXPORT_SYMBOL(netif_device_detach);
3215
3216
3217
3218
3219
3220
3221
3222void netif_device_attach(struct net_device *dev)
3223{
3224 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
3225 netif_running(dev)) {
3226 netif_tx_wake_all_queues(dev);
3227 __netdev_watchdog_up(dev);
3228 }
3229}
3230EXPORT_SYMBOL(netif_device_attach);
3231
3232
3233
3234
3235
3236static u16 skb_tx_hash(const struct net_device *dev,
3237 const struct net_device *sb_dev,
3238 struct sk_buff *skb)
3239{
3240 u32 hash;
3241 u16 qoffset = 0;
3242 u16 qcount = dev->real_num_tx_queues;
3243
3244 if (dev->num_tc) {
3245 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
3246
3247 qoffset = sb_dev->tc_to_txq[tc].offset;
3248 qcount = sb_dev->tc_to_txq[tc].count;
3249 }
3250
3251 if (skb_rx_queue_recorded(skb)) {
3252 hash = skb_get_rx_queue(skb);
3253 if (hash >= qoffset)
3254 hash -= qoffset;
3255 while (unlikely(hash >= qcount))
3256 hash -= qcount;
3257 return hash + qoffset;
3258 }
3259
3260 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
3261}
3262
3263static void skb_warn_bad_offload(const struct sk_buff *skb)
3264{
3265 static const netdev_features_t null_features;
3266 struct net_device *dev = skb->dev;
3267 const char *name = "";
3268
3269 if (!net_ratelimit())
3270 return;
3271
3272 if (dev) {
3273 if (dev->dev.parent)
3274 name = dev_driver_string(dev->dev.parent);
3275 else
3276 name = netdev_name(dev);
3277 }
3278 skb_dump(KERN_WARNING, skb, false);
3279 WARN(1, "%s: caps=(%pNF, %pNF)\n",
3280 name, dev ? &dev->features : &null_features,
3281 skb->sk ? &skb->sk->sk_route_caps : &null_features);
3282}
3283
3284
3285
3286
3287
3288int skb_checksum_help(struct sk_buff *skb)
3289{
3290 __wsum csum;
3291 int ret = 0, offset;
3292
3293 if (skb->ip_summed == CHECKSUM_COMPLETE)
3294 goto out_set_summed;
3295
3296 if (unlikely(skb_is_gso(skb))) {
3297 skb_warn_bad_offload(skb);
3298 return -EINVAL;
3299 }
3300
3301
3302
3303
3304 if (skb_has_shared_frag(skb)) {
3305 ret = __skb_linearize(skb);
3306 if (ret)
3307 goto out;
3308 }
3309
3310 offset = skb_checksum_start_offset(skb);
3311 BUG_ON(offset >= skb_headlen(skb));
3312 csum = skb_checksum(skb, offset, skb->len - offset, 0);
3313
3314 offset += skb->csum_offset;
3315 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
3316
3317 ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
3318 if (ret)
3319 goto out;
3320
3321 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
3322out_set_summed:
3323 skb->ip_summed = CHECKSUM_NONE;
3324out:
3325 return ret;
3326}
3327EXPORT_SYMBOL(skb_checksum_help);
3328
3329int skb_crc32c_csum_help(struct sk_buff *skb)
3330{
3331 __le32 crc32c_csum;
3332 int ret = 0, offset, start;
3333
3334 if (skb->ip_summed != CHECKSUM_PARTIAL)
3335 goto out;
3336
3337 if (unlikely(skb_is_gso(skb)))
3338 goto out;
3339
3340
3341
3342
3343 if (unlikely(skb_has_shared_frag(skb))) {
3344 ret = __skb_linearize(skb);
3345 if (ret)
3346 goto out;
3347 }
3348 start = skb_checksum_start_offset(skb);
3349 offset = start + offsetof(struct sctphdr, checksum);
3350 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
3351 ret = -EINVAL;
3352 goto out;
3353 }
3354
3355 ret = skb_ensure_writable(skb, offset + sizeof(__le32));
3356 if (ret)
3357 goto out;
3358
3359 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
3360 skb->len - start, ~(__u32)0,
3361 crc32c_csum_stub));
3362 *(__le32 *)(skb->data + offset) = crc32c_csum;
3363 skb->ip_summed = CHECKSUM_NONE;
3364 skb->csum_not_inet = 0;
3365out:
3366 return ret;
3367}
3368
3369__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
3370{
3371 __be16 type = skb->protocol;
3372
3373
3374 if (type == htons(ETH_P_TEB)) {
3375 struct ethhdr *eth;
3376
3377 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
3378 return 0;
3379
3380 eth = (struct ethhdr *)skb->data;
3381 type = eth->h_proto;
3382 }
3383
3384 return __vlan_get_protocol(skb, type, depth);
3385}
3386
3387
3388
3389
3390
3391
3392struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3393 netdev_features_t features)
3394{
3395 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
3396 struct packet_offload *ptype;
3397 int vlan_depth = skb->mac_len;
3398 __be16 type = skb_network_protocol(skb, &vlan_depth);
3399
3400 if (unlikely(!type))
3401 return ERR_PTR(-EINVAL);
3402
3403 __skb_pull(skb, vlan_depth);
3404
3405 rcu_read_lock();
3406 list_for_each_entry_rcu(ptype, &offload_base, list) {
3407 if (ptype->type == type && ptype->callbacks.gso_segment) {
3408 segs = ptype->callbacks.gso_segment(skb, features);
3409 break;
3410 }
3411 }
3412 rcu_read_unlock();
3413
3414 __skb_push(skb, skb->data - skb_mac_header(skb));
3415
3416 return segs;
3417}
3418EXPORT_SYMBOL(skb_mac_gso_segment);
3419
3420
3421
3422
3423static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
3424{
3425 if (tx_path)
3426 return skb->ip_summed != CHECKSUM_PARTIAL &&
3427 skb->ip_summed != CHECKSUM_UNNECESSARY;
3428
3429 return skb->ip_summed == CHECKSUM_NONE;
3430}
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3446 netdev_features_t features, bool tx_path)
3447{
3448 struct sk_buff *segs;
3449
3450 if (unlikely(skb_needs_check(skb, tx_path))) {
3451 int err;
3452
3453
3454 err = skb_cow_head(skb, 0);
3455 if (err < 0)
3456 return ERR_PTR(err);
3457 }
3458
3459
3460
3461
3462
3463 if (features & NETIF_F_GSO_PARTIAL) {
3464 netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
3465 struct net_device *dev = skb->dev;
3466
3467 partial_features |= dev->features & dev->gso_partial_features;
3468 if (!skb_gso_ok(skb, features | partial_features))
3469 features &= ~NETIF_F_GSO_PARTIAL;
3470 }
3471
3472 BUILD_BUG_ON(SKB_GSO_CB_OFFSET +
3473 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
3474
3475 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3476 SKB_GSO_CB(skb)->encap_level = 0;
3477
3478 skb_reset_mac_header(skb);
3479 skb_reset_mac_len(skb);
3480
3481 segs = skb_mac_gso_segment(skb, features);
3482
3483 if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
3484 skb_warn_bad_offload(skb);
3485
3486 return segs;
3487}
3488EXPORT_SYMBOL(__skb_gso_segment);
3489
3490
3491#ifdef CONFIG_BUG
3492static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3493{
3494 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
3495 skb_dump(KERN_ERR, skb, true);
3496 dump_stack();
3497}
3498
3499void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3500{
3501 DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb);
3502}
3503EXPORT_SYMBOL(netdev_rx_csum_fault);
3504#endif
3505
3506
3507static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3508{
3509#ifdef CONFIG_HIGHMEM
3510 int i;
3511
3512 if (!(dev->features & NETIF_F_HIGHDMA)) {
3513 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3514 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3515
3516 if (PageHighMem(skb_frag_page(frag)))
3517 return 1;
3518 }
3519 }
3520#endif
3521 return 0;
3522}
3523
3524
3525
3526
3527#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3528static netdev_features_t net_mpls_features(struct sk_buff *skb,
3529 netdev_features_t features,
3530 __be16 type)
3531{
3532 if (eth_p_mpls(type))
3533 features &= skb->dev->mpls_features;
3534
3535 return features;
3536}
3537#else
3538static netdev_features_t net_mpls_features(struct sk_buff *skb,
3539 netdev_features_t features,
3540 __be16 type)
3541{
3542 return features;
3543}
3544#endif
3545
3546static netdev_features_t harmonize_features(struct sk_buff *skb,
3547 netdev_features_t features)
3548{
3549 __be16 type;
3550
3551 type = skb_network_protocol(skb, NULL);
3552 features = net_mpls_features(skb, features, type);
3553
3554 if (skb->ip_summed != CHECKSUM_NONE &&
3555 !can_checksum_protocol(features, type)) {
3556 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3557 }
3558 if (illegal_highdma(skb->dev, skb))
3559 features &= ~NETIF_F_SG;
3560
3561 return features;
3562}
3563
3564netdev_features_t passthru_features_check(struct sk_buff *skb,
3565 struct net_device *dev,
3566 netdev_features_t features)
3567{
3568 return features;
3569}
3570EXPORT_SYMBOL(passthru_features_check);
3571
3572static netdev_features_t dflt_features_check(struct sk_buff *skb,
3573 struct net_device *dev,
3574 netdev_features_t features)
3575{
3576 return vlan_features_check(skb, features);
3577}
3578
3579static netdev_features_t gso_features_check(const struct sk_buff *skb,
3580 struct net_device *dev,
3581 netdev_features_t features)
3582{
3583 u16 gso_segs = skb_shinfo(skb)->gso_segs;
3584
3585 if (gso_segs > dev->gso_max_segs)
3586 return features & ~NETIF_F_GSO_MASK;
3587
3588 if (!skb_shinfo(skb)->gso_type) {
3589 skb_warn_bad_offload(skb);
3590 return features & ~NETIF_F_GSO_MASK;
3591 }
3592
3593
3594
3595
3596
3597
3598
3599 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3600 features &= ~dev->gso_partial_features;
3601
3602
3603
3604
3605 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3606 struct iphdr *iph = skb->encapsulation ?
3607 inner_ip_hdr(skb) : ip_hdr(skb);
3608
3609 if (!(iph->frag_off & htons(IP_DF)))
3610 features &= ~NETIF_F_TSO_MANGLEID;
3611 }
3612
3613 return features;
3614}
3615
3616netdev_features_t netif_skb_features(struct sk_buff *skb)
3617{
3618 struct net_device *dev = skb->dev;
3619 netdev_features_t features = dev->features;
3620
3621 if (skb_is_gso(skb))
3622 features = gso_features_check(skb, dev, features);
3623
3624
3625
3626
3627
3628 if (skb->encapsulation)
3629 features &= dev->hw_enc_features;
3630
3631 if (skb_vlan_tagged(skb))
3632 features = netdev_intersect_features(features,
3633 dev->vlan_features |
3634 NETIF_F_HW_VLAN_CTAG_TX |
3635 NETIF_F_HW_VLAN_STAG_TX);
3636
3637 if (dev->netdev_ops->ndo_features_check)
3638 features &= dev->netdev_ops->ndo_features_check(skb, dev,
3639 features);
3640 else
3641 features &= dflt_features_check(skb, dev, features);
3642
3643 return harmonize_features(skb, features);
3644}
3645EXPORT_SYMBOL(netif_skb_features);
3646
3647static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3648 struct netdev_queue *txq, bool more)
3649{
3650 unsigned int len;
3651 int rc;
3652
3653 if (dev_nit_active(dev))
3654 dev_queue_xmit_nit(skb, dev);
3655
3656 len = skb->len;
3657 PRANDOM_ADD_NOISE(skb, dev, txq, len + jiffies);
3658 trace_net_dev_start_xmit(skb, dev);
3659 rc = netdev_start_xmit(skb, dev, txq, more);
3660 trace_net_dev_xmit(skb, rc, dev, len);
3661
3662 return rc;
3663}
3664
3665struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3666 struct netdev_queue *txq, int *ret)
3667{
3668 struct sk_buff *skb = first;
3669 int rc = NETDEV_TX_OK;
3670
3671 while (skb) {
3672 struct sk_buff *next = skb->next;
3673
3674 skb_mark_not_on_list(skb);
3675 rc = xmit_one(skb, dev, txq, next != NULL);
3676 if (unlikely(!dev_xmit_complete(rc))) {
3677 skb->next = next;
3678 goto out;
3679 }
3680
3681 skb = next;
3682 if (netif_tx_queue_stopped(txq) && skb) {
3683 rc = NETDEV_TX_BUSY;
3684 break;
3685 }
3686 }
3687
3688out:
3689 *ret = rc;
3690 return skb;
3691}
3692
3693static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3694 netdev_features_t features)
3695{
3696 if (skb_vlan_tag_present(skb) &&
3697 !vlan_hw_offload_capable(features, skb->vlan_proto))
3698 skb = __vlan_hwaccel_push_inside(skb);
3699 return skb;
3700}
3701
3702int skb_csum_hwoffload_help(struct sk_buff *skb,
3703 const netdev_features_t features)
3704{
3705 if (unlikely(skb_csum_is_sctp(skb)))
3706 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3707 skb_crc32c_csum_help(skb);
3708
3709 if (features & NETIF_F_HW_CSUM)
3710 return 0;
3711
3712 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
3713 switch (skb->csum_offset) {
3714 case offsetof(struct tcphdr, check):
3715 case offsetof(struct udphdr, check):
3716 return 0;
3717 }
3718 }
3719
3720 return skb_checksum_help(skb);
3721}
3722EXPORT_SYMBOL(skb_csum_hwoffload_help);
3723
3724static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3725{
3726 netdev_features_t features;
3727
3728 features = netif_skb_features(skb);
3729 skb = validate_xmit_vlan(skb, features);
3730 if (unlikely(!skb))
3731 goto out_null;
3732
3733 skb = sk_validate_xmit_skb(skb, dev);
3734 if (unlikely(!skb))
3735 goto out_null;
3736
3737 if (netif_needs_gso(skb, features)) {
3738 struct sk_buff *segs;
3739
3740 segs = skb_gso_segment(skb, features);
3741 if (IS_ERR(segs)) {
3742 goto out_kfree_skb;
3743 } else if (segs) {
3744 consume_skb(skb);
3745 skb = segs;
3746 }
3747 } else {
3748 if (skb_needs_linearize(skb, features) &&
3749 __skb_linearize(skb))
3750 goto out_kfree_skb;
3751
3752
3753
3754
3755
3756 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3757 if (skb->encapsulation)
3758 skb_set_inner_transport_header(skb,
3759 skb_checksum_start_offset(skb));
3760 else
3761 skb_set_transport_header(skb,
3762 skb_checksum_start_offset(skb));
3763 if (skb_csum_hwoffload_help(skb, features))
3764 goto out_kfree_skb;
3765 }
3766 }
3767
3768 skb = validate_xmit_xfrm(skb, features, again);
3769
3770 return skb;
3771
3772out_kfree_skb:
3773 kfree_skb(skb);
3774out_null:
3775 atomic_long_inc(&dev->tx_dropped);
3776 return NULL;
3777}
3778
3779struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3780{
3781 struct sk_buff *next, *head = NULL, *tail;
3782
3783 for (; skb != NULL; skb = next) {
3784 next = skb->next;
3785 skb_mark_not_on_list(skb);
3786
3787
3788 skb->prev = skb;
3789
3790 skb = validate_xmit_skb(skb, dev, again);
3791 if (!skb)
3792 continue;
3793
3794 if (!head)
3795 head = skb;
3796 else
3797 tail->next = skb;
3798
3799
3800
3801 tail = skb->prev;
3802 }
3803 return head;
3804}
3805EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3806
3807static void qdisc_pkt_len_init(struct sk_buff *skb)
3808{
3809 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3810
3811 qdisc_skb_cb(skb)->pkt_len = skb->len;
3812
3813
3814
3815
3816 if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
3817 unsigned int hdr_len;
3818 u16 gso_segs = shinfo->gso_segs;
3819
3820
3821 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3822
3823
3824 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3825 const struct tcphdr *th;
3826 struct tcphdr _tcphdr;
3827
3828 th = skb_header_pointer(skb, skb_transport_offset(skb),
3829 sizeof(_tcphdr), &_tcphdr);
3830 if (likely(th))
3831 hdr_len += __tcp_hdrlen(th);
3832 } else {
3833 struct udphdr _udphdr;
3834
3835 if (skb_header_pointer(skb, skb_transport_offset(skb),
3836 sizeof(_udphdr), &_udphdr))
3837 hdr_len += sizeof(struct udphdr);
3838 }
3839
3840 if (shinfo->gso_type & SKB_GSO_DODGY)
3841 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3842 shinfo->gso_size);
3843
3844 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3845 }
3846}
3847
3848static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
3849 struct sk_buff **to_free,
3850 struct netdev_queue *txq)
3851{
3852 int rc;
3853
3854 rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK;
3855 if (rc == NET_XMIT_SUCCESS)
3856 trace_qdisc_enqueue(q, txq, skb);
3857 return rc;
3858}
3859
3860static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3861 struct net_device *dev,
3862 struct netdev_queue *txq)
3863{
3864 spinlock_t *root_lock = qdisc_lock(q);
3865 struct sk_buff *to_free = NULL;
3866 bool contended;
3867 int rc;
3868
3869 qdisc_calculate_pkt_len(skb, q);
3870
3871 if (q->flags & TCQ_F_NOLOCK) {
3872 if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) &&
3873 qdisc_run_begin(q)) {
3874
3875
3876
3877 if (unlikely(!nolock_qdisc_is_empty(q))) {
3878 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3879 __qdisc_run(q);
3880 qdisc_run_end(q);
3881
3882 goto no_lock_out;
3883 }
3884
3885 qdisc_bstats_cpu_update(q, skb);
3886 if (sch_direct_xmit(skb, q, dev, txq, NULL, true) &&
3887 !nolock_qdisc_is_empty(q))
3888 __qdisc_run(q);
3889
3890 qdisc_run_end(q);
3891 return NET_XMIT_SUCCESS;
3892 }
3893
3894 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3895 qdisc_run(q);
3896
3897no_lock_out:
3898 if (unlikely(to_free))
3899 kfree_skb_list(to_free);
3900 return rc;
3901 }
3902
3903
3904
3905
3906
3907
3908
3909 contended = qdisc_is_running(q);
3910 if (unlikely(contended))
3911 spin_lock(&q->busylock);
3912
3913 spin_lock(root_lock);
3914 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3915 __qdisc_drop(skb, &to_free);
3916 rc = NET_XMIT_DROP;
3917 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3918 qdisc_run_begin(q)) {
3919
3920
3921
3922
3923
3924
3925 qdisc_bstats_update(q, skb);
3926
3927 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3928 if (unlikely(contended)) {
3929 spin_unlock(&q->busylock);
3930 contended = false;
3931 }
3932 __qdisc_run(q);
3933 }
3934
3935 qdisc_run_end(q);
3936 rc = NET_XMIT_SUCCESS;
3937 } else {
3938 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3939 if (qdisc_run_begin(q)) {
3940 if (unlikely(contended)) {
3941 spin_unlock(&q->busylock);
3942 contended = false;
3943 }
3944 __qdisc_run(q);
3945 qdisc_run_end(q);
3946 }
3947 }
3948 spin_unlock(root_lock);
3949 if (unlikely(to_free))
3950 kfree_skb_list(to_free);
3951 if (unlikely(contended))
3952 spin_unlock(&q->busylock);
3953 return rc;
3954}
3955
3956#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3957static void skb_update_prio(struct sk_buff *skb)
3958{
3959 const struct netprio_map *map;
3960 const struct sock *sk;
3961 unsigned int prioidx;
3962
3963 if (skb->priority)
3964 return;
3965 map = rcu_dereference_bh(skb->dev->priomap);
3966 if (!map)
3967 return;
3968 sk = skb_to_full_sk(skb);
3969 if (!sk)
3970 return;
3971
3972 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3973
3974 if (prioidx < map->priomap_len)
3975 skb->priority = map->priomap[prioidx];
3976}
3977#else
3978#define skb_update_prio(skb)
3979#endif
3980
3981
3982
3983
3984
3985
3986
3987int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3988{
3989 skb_reset_mac_header(skb);
3990 __skb_pull(skb, skb_network_offset(skb));
3991 skb->pkt_type = PACKET_LOOPBACK;
3992 skb->ip_summed = CHECKSUM_UNNECESSARY;
3993 WARN_ON(!skb_dst(skb));
3994 skb_dst_force(skb);
3995 netif_rx_ni(skb);
3996 return 0;
3997}
3998EXPORT_SYMBOL(dev_loopback_xmit);
3999
4000#ifdef CONFIG_NET_EGRESS
4001static struct sk_buff *
4002sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
4003{
4004 struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
4005 struct tcf_result cl_res;
4006
4007 if (!miniq)
4008 return skb;
4009
4010
4011 qdisc_skb_cb(skb)->mru = 0;
4012 qdisc_skb_cb(skb)->post_ct = false;
4013 mini_qdisc_bstats_cpu_update(miniq, skb);
4014
4015 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
4016 case TC_ACT_OK:
4017 case TC_ACT_RECLASSIFY:
4018 skb->tc_index = TC_H_MIN(cl_res.classid);
4019 break;
4020 case TC_ACT_SHOT:
4021 mini_qdisc_qstats_cpu_drop(miniq);
4022 *ret = NET_XMIT_DROP;
4023 kfree_skb(skb);
4024 return NULL;
4025 case TC_ACT_STOLEN:
4026 case TC_ACT_QUEUED:
4027 case TC_ACT_TRAP:
4028 *ret = NET_XMIT_SUCCESS;
4029 consume_skb(skb);
4030 return NULL;
4031 case TC_ACT_REDIRECT:
4032
4033 skb_do_redirect(skb);
4034 *ret = NET_XMIT_SUCCESS;
4035 return NULL;
4036 default:
4037 break;
4038 }
4039
4040 return skb;
4041}
4042#endif
4043
4044#ifdef CONFIG_XPS
4045static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
4046 struct xps_dev_maps *dev_maps, unsigned int tci)
4047{
4048 int tc = netdev_get_prio_tc_map(dev, skb->priority);
4049 struct xps_map *map;
4050 int queue_index = -1;
4051
4052 if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids)
4053 return queue_index;
4054
4055 tci *= dev_maps->num_tc;
4056 tci += tc;
4057
4058 map = rcu_dereference(dev_maps->attr_map[tci]);
4059 if (map) {
4060 if (map->len == 1)
4061 queue_index = map->queues[0];
4062 else
4063 queue_index = map->queues[reciprocal_scale(
4064 skb_get_hash(skb), map->len)];
4065 if (unlikely(queue_index >= dev->real_num_tx_queues))
4066 queue_index = -1;
4067 }
4068 return queue_index;
4069}
4070#endif
4071
4072static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
4073 struct sk_buff *skb)
4074{
4075#ifdef CONFIG_XPS
4076 struct xps_dev_maps *dev_maps;
4077 struct sock *sk = skb->sk;
4078 int queue_index = -1;
4079
4080 if (!static_key_false(&xps_needed))
4081 return -1;
4082
4083 rcu_read_lock();
4084 if (!static_key_false(&xps_rxqs_needed))
4085 goto get_cpus_map;
4086
4087 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]);
4088 if (dev_maps) {
4089 int tci = sk_rx_queue_get(sk);
4090
4091 if (tci >= 0)
4092 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4093 tci);
4094 }
4095
4096get_cpus_map:
4097 if (queue_index < 0) {
4098 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]);
4099 if (dev_maps) {
4100 unsigned int tci = skb->sender_cpu - 1;
4101
4102 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4103 tci);
4104 }
4105 }
4106 rcu_read_unlock();
4107
4108 return queue_index;
4109#else
4110 return -1;
4111#endif
4112}
4113
4114u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
4115 struct net_device *sb_dev)
4116{
4117 return 0;
4118}
4119EXPORT_SYMBOL(dev_pick_tx_zero);
4120
4121u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
4122 struct net_device *sb_dev)
4123{
4124 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
4125}
4126EXPORT_SYMBOL(dev_pick_tx_cpu_id);
4127
4128u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
4129 struct net_device *sb_dev)
4130{
4131 struct sock *sk = skb->sk;
4132 int queue_index = sk_tx_queue_get(sk);
4133
4134 sb_dev = sb_dev ? : dev;
4135
4136 if (queue_index < 0 || skb->ooo_okay ||
4137 queue_index >= dev->real_num_tx_queues) {
4138 int new_index = get_xps_queue(dev, sb_dev, skb);
4139
4140 if (new_index < 0)
4141 new_index = skb_tx_hash(dev, sb_dev, skb);
4142
4143 if (queue_index != new_index && sk &&
4144 sk_fullsock(sk) &&
4145 rcu_access_pointer(sk->sk_dst_cache))
4146 sk_tx_queue_set(sk, new_index);
4147
4148 queue_index = new_index;
4149 }
4150
4151 return queue_index;
4152}
4153EXPORT_SYMBOL(netdev_pick_tx);
4154
4155struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
4156 struct sk_buff *skb,
4157 struct net_device *sb_dev)
4158{
4159 int queue_index = 0;
4160
4161#ifdef CONFIG_XPS
4162 u32 sender_cpu = skb->sender_cpu - 1;
4163
4164 if (sender_cpu >= (u32)NR_CPUS)
4165 skb->sender_cpu = raw_smp_processor_id() + 1;
4166#endif
4167
4168 if (dev->real_num_tx_queues != 1) {
4169 const struct net_device_ops *ops = dev->netdev_ops;
4170
4171 if (ops->ndo_select_queue)
4172 queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
4173 else
4174 queue_index = netdev_pick_tx(dev, skb, sb_dev);
4175
4176 queue_index = netdev_cap_txqueue(dev, queue_index);
4177 }
4178
4179 skb_set_queue_mapping(skb, queue_index);
4180 return netdev_get_tx_queue(dev, queue_index);
4181}
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
4210{
4211 struct net_device *dev = skb->dev;
4212 struct netdev_queue *txq;
4213 struct Qdisc *q;
4214 int rc = -ENOMEM;
4215 bool again = false;
4216
4217 skb_reset_mac_header(skb);
4218
4219 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
4220 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED);
4221
4222
4223
4224
4225 rcu_read_lock_bh();
4226
4227 skb_update_prio(skb);
4228
4229 qdisc_pkt_len_init(skb);
4230#ifdef CONFIG_NET_CLS_ACT
4231 skb->tc_at_ingress = 0;
4232# ifdef CONFIG_NET_EGRESS
4233 if (static_branch_unlikely(&egress_needed_key)) {
4234 skb = sch_handle_egress(skb, &rc, dev);
4235 if (!skb)
4236 goto out;
4237 }
4238# endif
4239#endif
4240
4241
4242
4243 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
4244 skb_dst_drop(skb);
4245 else
4246 skb_dst_force(skb);
4247
4248 txq = netdev_core_pick_tx(dev, skb, sb_dev);
4249 q = rcu_dereference_bh(txq->qdisc);
4250
4251 trace_net_dev_queue(skb);
4252 if (q->enqueue) {
4253 rc = __dev_xmit_skb(skb, q, dev, txq);
4254 goto out;
4255 }
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269 if (dev->flags & IFF_UP) {
4270 int cpu = smp_processor_id();
4271
4272 if (txq->xmit_lock_owner != cpu) {
4273 if (dev_xmit_recursion())
4274 goto recursion_alert;
4275
4276 skb = validate_xmit_skb(skb, dev, &again);
4277 if (!skb)
4278 goto out;
4279
4280 PRANDOM_ADD_NOISE(skb, dev, txq, jiffies);
4281 HARD_TX_LOCK(dev, txq, cpu);
4282
4283 if (!netif_xmit_stopped(txq)) {
4284 dev_xmit_recursion_inc();
4285 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
4286 dev_xmit_recursion_dec();
4287 if (dev_xmit_complete(rc)) {
4288 HARD_TX_UNLOCK(dev, txq);
4289 goto out;
4290 }
4291 }
4292 HARD_TX_UNLOCK(dev, txq);
4293 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4294 dev->name);
4295 } else {
4296
4297
4298
4299recursion_alert:
4300 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4301 dev->name);
4302 }
4303 }
4304
4305 rc = -ENETDOWN;
4306 rcu_read_unlock_bh();
4307
4308 atomic_long_inc(&dev->tx_dropped);
4309 kfree_skb_list(skb);
4310 return rc;
4311out:
4312 rcu_read_unlock_bh();
4313 return rc;
4314}
4315
4316int dev_queue_xmit(struct sk_buff *skb)
4317{
4318 return __dev_queue_xmit(skb, NULL);
4319}
4320EXPORT_SYMBOL(dev_queue_xmit);
4321
4322int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
4323{
4324 return __dev_queue_xmit(skb, sb_dev);
4325}
4326EXPORT_SYMBOL(dev_queue_xmit_accel);
4327
4328int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
4329{
4330 struct net_device *dev = skb->dev;
4331 struct sk_buff *orig_skb = skb;
4332 struct netdev_queue *txq;
4333 int ret = NETDEV_TX_BUSY;
4334 bool again = false;
4335
4336 if (unlikely(!netif_running(dev) ||
4337 !netif_carrier_ok(dev)))
4338 goto drop;
4339
4340 skb = validate_xmit_skb_list(skb, dev, &again);
4341 if (skb != orig_skb)
4342 goto drop;
4343
4344 skb_set_queue_mapping(skb, queue_id);
4345 txq = skb_get_tx_queue(dev, skb);
4346 PRANDOM_ADD_NOISE(skb, dev, txq, jiffies);
4347
4348 local_bh_disable();
4349
4350 dev_xmit_recursion_inc();
4351 HARD_TX_LOCK(dev, txq, smp_processor_id());
4352 if (!netif_xmit_frozen_or_drv_stopped(txq))
4353 ret = netdev_start_xmit(skb, dev, txq, false);
4354 HARD_TX_UNLOCK(dev, txq);
4355 dev_xmit_recursion_dec();
4356
4357 local_bh_enable();
4358 return ret;
4359drop:
4360 atomic_long_inc(&dev->tx_dropped);
4361 kfree_skb_list(skb);
4362 return NET_XMIT_DROP;
4363}
4364EXPORT_SYMBOL(__dev_direct_xmit);
4365
4366
4367
4368
4369
4370int netdev_max_backlog __read_mostly = 1000;
4371EXPORT_SYMBOL(netdev_max_backlog);
4372
4373int netdev_tstamp_prequeue __read_mostly = 1;
4374int netdev_budget __read_mostly = 300;
4375
4376unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
4377int weight_p __read_mostly = 64;
4378int dev_weight_rx_bias __read_mostly = 1;
4379int dev_weight_tx_bias __read_mostly = 1;
4380int dev_rx_weight __read_mostly = 64;
4381int dev_tx_weight __read_mostly = 64;
4382
4383int gro_normal_batch __read_mostly = 8;
4384
4385
4386static inline void ____napi_schedule(struct softnet_data *sd,
4387 struct napi_struct *napi)
4388{
4389 struct task_struct *thread;
4390
4391 if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
4392
4393
4394
4395
4396
4397
4398 thread = READ_ONCE(napi->thread);
4399 if (thread) {
4400
4401
4402
4403
4404
4405 if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE)
4406 set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
4407 wake_up_process(thread);
4408 return;
4409 }
4410 }
4411
4412 list_add_tail(&napi->poll_list, &sd->poll_list);
4413 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4414}
4415
4416#ifdef CONFIG_RPS
4417
4418
4419struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
4420EXPORT_SYMBOL(rps_sock_flow_table);
4421u32 rps_cpu_mask __read_mostly;
4422EXPORT_SYMBOL(rps_cpu_mask);
4423
4424struct static_key_false rps_needed __read_mostly;
4425EXPORT_SYMBOL(rps_needed);
4426struct static_key_false rfs_needed __read_mostly;
4427EXPORT_SYMBOL(rfs_needed);
4428
4429static struct rps_dev_flow *
4430set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4431 struct rps_dev_flow *rflow, u16 next_cpu)
4432{
4433 if (next_cpu < nr_cpu_ids) {
4434#ifdef CONFIG_RFS_ACCEL
4435 struct netdev_rx_queue *rxqueue;
4436 struct rps_dev_flow_table *flow_table;
4437 struct rps_dev_flow *old_rflow;
4438 u32 flow_id;
4439 u16 rxq_index;
4440 int rc;
4441
4442
4443 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4444 !(dev->features & NETIF_F_NTUPLE))
4445 goto out;
4446 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4447 if (rxq_index == skb_get_rx_queue(skb))
4448 goto out;
4449
4450 rxqueue = dev->_rx + rxq_index;
4451 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4452 if (!flow_table)
4453 goto out;
4454 flow_id = skb_get_hash(skb) & flow_table->mask;
4455 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4456 rxq_index, flow_id);
4457 if (rc < 0)
4458 goto out;
4459 old_rflow = rflow;
4460 rflow = &flow_table->flows[flow_id];
4461 rflow->filter = rc;
4462 if (old_rflow->filter == rflow->filter)
4463 old_rflow->filter = RPS_NO_FILTER;
4464 out:
4465#endif
4466 rflow->last_qtail =
4467 per_cpu(softnet_data, next_cpu).input_queue_head;
4468 }
4469
4470 rflow->cpu = next_cpu;
4471 return rflow;
4472}
4473
4474
4475
4476
4477
4478
4479static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4480 struct rps_dev_flow **rflowp)
4481{
4482 const struct rps_sock_flow_table *sock_flow_table;
4483 struct netdev_rx_queue *rxqueue = dev->_rx;
4484 struct rps_dev_flow_table *flow_table;
4485 struct rps_map *map;
4486 int cpu = -1;
4487 u32 tcpu;
4488 u32 hash;
4489
4490 if (skb_rx_queue_recorded(skb)) {
4491 u16 index = skb_get_rx_queue(skb);
4492
4493 if (unlikely(index >= dev->real_num_rx_queues)) {
4494 WARN_ONCE(dev->real_num_rx_queues > 1,
4495 "%s received packet on queue %u, but number "
4496 "of RX queues is %u\n",
4497 dev->name, index, dev->real_num_rx_queues);
4498 goto done;
4499 }
4500 rxqueue += index;
4501 }
4502
4503
4504
4505 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4506 map = rcu_dereference(rxqueue->rps_map);
4507 if (!flow_table && !map)
4508 goto done;
4509
4510 skb_reset_network_header(skb);
4511 hash = skb_get_hash(skb);
4512 if (!hash)
4513 goto done;
4514
4515 sock_flow_table = rcu_dereference(rps_sock_flow_table);
4516 if (flow_table && sock_flow_table) {
4517 struct rps_dev_flow *rflow;
4518 u32 next_cpu;
4519 u32 ident;
4520
4521
4522 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
4523 if ((ident ^ hash) & ~rps_cpu_mask)
4524 goto try_rps;
4525
4526 next_cpu = ident & rps_cpu_mask;
4527
4528
4529
4530
4531 rflow = &flow_table->flows[hash & flow_table->mask];
4532 tcpu = rflow->cpu;
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545 if (unlikely(tcpu != next_cpu) &&
4546 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
4547 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
4548 rflow->last_qtail)) >= 0)) {
4549 tcpu = next_cpu;
4550 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
4551 }
4552
4553 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
4554 *rflowp = rflow;
4555 cpu = tcpu;
4556 goto done;
4557 }
4558 }
4559
4560try_rps:
4561
4562 if (map) {
4563 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
4564 if (cpu_online(tcpu)) {
4565 cpu = tcpu;
4566 goto done;
4567 }
4568 }
4569
4570done:
4571 return cpu;
4572}
4573
4574#ifdef CONFIG_RFS_ACCEL
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4588 u32 flow_id, u16 filter_id)
4589{
4590 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4591 struct rps_dev_flow_table *flow_table;
4592 struct rps_dev_flow *rflow;
4593 bool expire = true;
4594 unsigned int cpu;
4595
4596 rcu_read_lock();
4597 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4598 if (flow_table && flow_id <= flow_table->mask) {
4599 rflow = &flow_table->flows[flow_id];
4600 cpu = READ_ONCE(rflow->cpu);
4601 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
4602 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
4603 rflow->last_qtail) <
4604 (int)(10 * flow_table->mask)))
4605 expire = false;
4606 }
4607 rcu_read_unlock();
4608 return expire;
4609}
4610EXPORT_SYMBOL(rps_may_expire_flow);
4611
4612#endif
4613
4614
4615static void rps_trigger_softirq(void *data)
4616{
4617 struct softnet_data *sd = data;
4618
4619 ____napi_schedule(sd, &sd->backlog);
4620 sd->received_rps++;
4621}
4622
4623#endif
4624
4625
4626
4627
4628
4629
4630static int rps_ipi_queued(struct softnet_data *sd)
4631{
4632#ifdef CONFIG_RPS
4633 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
4634
4635 if (sd != mysd) {
4636 sd->rps_ipi_next = mysd->rps_ipi_list;
4637 mysd->rps_ipi_list = sd;
4638
4639 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4640 return 1;
4641 }
4642#endif
4643 return 0;
4644}
4645
4646#ifdef CONFIG_NET_FLOW_LIMIT
4647int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4648#endif
4649
4650static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4651{
4652#ifdef CONFIG_NET_FLOW_LIMIT
4653 struct sd_flow_limit *fl;
4654 struct softnet_data *sd;
4655 unsigned int old_flow, new_flow;
4656
4657 if (qlen < (netdev_max_backlog >> 1))
4658 return false;
4659
4660 sd = this_cpu_ptr(&softnet_data);
4661
4662 rcu_read_lock();
4663 fl = rcu_dereference(sd->flow_limit);
4664 if (fl) {
4665 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
4666 old_flow = fl->history[fl->history_head];
4667 fl->history[fl->history_head] = new_flow;
4668
4669 fl->history_head++;
4670 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4671
4672 if (likely(fl->buckets[old_flow]))
4673 fl->buckets[old_flow]--;
4674
4675 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4676 fl->count++;
4677 rcu_read_unlock();
4678 return true;
4679 }
4680 }
4681 rcu_read_unlock();
4682#endif
4683 return false;
4684}
4685
4686
4687
4688
4689
4690static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4691 unsigned int *qtail)
4692{
4693 struct softnet_data *sd;
4694 unsigned long flags;
4695 unsigned int qlen;
4696
4697 sd = &per_cpu(softnet_data, cpu);
4698
4699 local_irq_save(flags);
4700
4701 rps_lock(sd);
4702 if (!netif_running(skb->dev))
4703 goto drop;
4704 qlen = skb_queue_len(&sd->input_pkt_queue);
4705 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
4706 if (qlen) {
4707enqueue:
4708 __skb_queue_tail(&sd->input_pkt_queue, skb);
4709 input_queue_tail_incr_save(sd, qtail);
4710 rps_unlock(sd);
4711 local_irq_restore(flags);
4712 return NET_RX_SUCCESS;
4713 }
4714
4715
4716
4717
4718 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
4719 if (!rps_ipi_queued(sd))
4720 ____napi_schedule(sd, &sd->backlog);
4721 }
4722 goto enqueue;
4723 }
4724
4725drop:
4726 sd->dropped++;
4727 rps_unlock(sd);
4728
4729 local_irq_restore(flags);
4730
4731 atomic_long_inc(&skb->dev->rx_dropped);
4732 kfree_skb(skb);
4733 return NET_RX_DROP;
4734}
4735
4736static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4737{
4738 struct net_device *dev = skb->dev;
4739 struct netdev_rx_queue *rxqueue;
4740
4741 rxqueue = dev->_rx;
4742
4743 if (skb_rx_queue_recorded(skb)) {
4744 u16 index = skb_get_rx_queue(skb);
4745
4746 if (unlikely(index >= dev->real_num_rx_queues)) {
4747 WARN_ONCE(dev->real_num_rx_queues > 1,
4748 "%s received packet on queue %u, but number "
4749 "of RX queues is %u\n",
4750 dev->name, index, dev->real_num_rx_queues);
4751
4752 return rxqueue;
4753 }
4754 rxqueue += index;
4755 }
4756 return rxqueue;
4757}
4758
4759static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4760 struct xdp_buff *xdp,
4761 struct bpf_prog *xdp_prog)
4762{
4763 void *orig_data, *orig_data_end, *hard_start;
4764 struct netdev_rx_queue *rxqueue;
4765 u32 metalen, act = XDP_DROP;
4766 bool orig_bcast, orig_host;
4767 u32 mac_len, frame_sz;
4768 __be16 orig_eth_type;
4769 struct ethhdr *eth;
4770 int off;
4771
4772
4773
4774
4775 if (skb_is_redirected(skb))
4776 return XDP_PASS;
4777
4778
4779
4780
4781
4782 if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
4783 skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4784 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4785 int troom = skb->tail + skb->data_len - skb->end;
4786
4787
4788
4789
4790 if (pskb_expand_head(skb,
4791 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4792 troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
4793 goto do_drop;
4794 if (skb_linearize(skb))
4795 goto do_drop;
4796 }
4797
4798
4799
4800
4801 mac_len = skb->data - skb_mac_header(skb);
4802 hard_start = skb->data - skb_headroom(skb);
4803
4804
4805 frame_sz = (void *)skb_end_pointer(skb) - hard_start;
4806 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4807
4808 rxqueue = netif_get_rxqueue(skb);
4809 xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
4810 xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
4811 skb_headlen(skb) + mac_len, true);
4812
4813 orig_data_end = xdp->data_end;
4814 orig_data = xdp->data;
4815 eth = (struct ethhdr *)xdp->data;
4816 orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr);
4817 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
4818 orig_eth_type = eth->h_proto;
4819
4820 act = bpf_prog_run_xdp(xdp_prog, xdp);
4821
4822
4823 off = xdp->data - orig_data;
4824 if (off) {
4825 if (off > 0)
4826 __skb_pull(skb, off);
4827 else if (off < 0)
4828 __skb_push(skb, -off);
4829
4830 skb->mac_header += off;
4831 skb_reset_network_header(skb);
4832 }
4833
4834
4835 off = xdp->data_end - orig_data_end;
4836 if (off != 0) {
4837 skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
4838 skb->len += off;
4839 }
4840
4841
4842 eth = (struct ethhdr *)xdp->data;
4843 if ((orig_eth_type != eth->h_proto) ||
4844 (orig_host != ether_addr_equal_64bits(eth->h_dest,
4845 skb->dev->dev_addr)) ||
4846 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
4847 __skb_push(skb, ETH_HLEN);
4848 skb->pkt_type = PACKET_HOST;
4849 skb->protocol = eth_type_trans(skb, skb->dev);
4850 }
4851
4852 switch (act) {
4853 case XDP_REDIRECT:
4854 case XDP_TX:
4855 __skb_push(skb, mac_len);
4856 break;
4857 case XDP_PASS:
4858 metalen = xdp->data - xdp->data_meta;
4859