1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/bitops.h>
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/netdevice.h>
22#include <linux/skbuff.h>
23#include <linux/rtnetlink.h>
24#include <linux/init.h>
25#include <linux/rcupdate.h>
26#include <linux/list.h>
27#include <linux/slab.h>
28#include <net/pkt_sched.h>
29#include <net/dst.h>
30
31
32
33
34
35
36
37
38
39
40
41
42static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
43{
44 skb_dst_force(skb);
45 q->gso_skb = skb;
46 q->qstats.requeues++;
47 q->q.qlen++;
48 __netif_schedule(q);
49
50 return 0;
51}
52
53static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
54{
55 struct sk_buff *skb = q->gso_skb;
56
57 if (unlikely(skb)) {
58 struct net_device *dev = qdisc_dev(q);
59 struct netdev_queue *txq;
60
61
62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
63 if (!netif_tx_queue_stopped(txq) &&
64 !netif_tx_queue_frozen(txq)) {
65 q->gso_skb = NULL;
66 q->q.qlen--;
67 } else
68 skb = NULL;
69 } else {
70 skb = q->dequeue(q);
71 }
72
73 return skb;
74}
75
76static inline int handle_dev_cpu_collision(struct sk_buff *skb,
77 struct netdev_queue *dev_queue,
78 struct Qdisc *q)
79{
80 int ret;
81
82 if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) {
83
84
85
86
87
88
89 kfree_skb(skb);
90 if (net_ratelimit())
91 printk(KERN_WARNING "Dead loop on netdevice %s, "
92 "fix it urgently!\n", dev_queue->dev->name);
93 ret = qdisc_qlen(q);
94 } else {
95
96
97
98
99 __this_cpu_inc(softnet_data.cpu_collision);
100 ret = dev_requeue_skb(skb, q);
101 }
102
103 return ret;
104}
105
106
107
108
109
110
111
112
113
114
115int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
116 struct net_device *dev, struct netdev_queue *txq,
117 spinlock_t *root_lock)
118{
119 int ret = NETDEV_TX_BUSY;
120
121
122 spin_unlock(root_lock);
123
124 HARD_TX_LOCK(dev, txq, smp_processor_id());
125 if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
126 ret = dev_hard_start_xmit(skb, dev, txq);
127
128 HARD_TX_UNLOCK(dev, txq);
129
130 spin_lock(root_lock);
131
132 if (dev_xmit_complete(ret)) {
133
134 ret = qdisc_qlen(q);
135 } else if (ret == NETDEV_TX_LOCKED) {
136
137 ret = handle_dev_cpu_collision(skb, txq, q);
138 } else {
139
140 if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
141 printk(KERN_WARNING "BUG %s code %d qlen %d\n",
142 dev->name, ret, q->q.qlen);
143
144 ret = dev_requeue_skb(skb, q);
145 }
146
147 if (ret && (netif_tx_queue_stopped(txq) ||
148 netif_tx_queue_frozen(txq)))
149 ret = 0;
150
151 return ret;
152}
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173static inline int qdisc_restart(struct Qdisc *q)
174{
175 struct netdev_queue *txq;
176 struct net_device *dev;
177 spinlock_t *root_lock;
178 struct sk_buff *skb;
179
180
181 skb = dequeue_skb(q);
182 if (unlikely(!skb))
183 return 0;
184 WARN_ON_ONCE(skb_dst_is_noref(skb));
185 root_lock = qdisc_lock(q);
186 dev = qdisc_dev(q);
187 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
188
189 return sch_direct_xmit(skb, q, dev, txq, root_lock);
190}
191
192void __qdisc_run(struct Qdisc *q)
193{
194 unsigned long start_time = jiffies;
195
196 while (qdisc_restart(q)) {
197
198
199
200
201
202 if (need_resched() || jiffies != start_time) {
203 __netif_schedule(q);
204 break;
205 }
206 }
207
208 qdisc_run_end(q);
209}
210
211unsigned long dev_trans_start(struct net_device *dev)
212{
213 unsigned long val, res = dev->trans_start;
214 unsigned int i;
215
216 for (i = 0; i < dev->num_tx_queues; i++) {
217 val = netdev_get_tx_queue(dev, i)->trans_start;
218 if (val && time_after(val, res))
219 res = val;
220 }
221 dev->trans_start = res;
222 return res;
223}
224EXPORT_SYMBOL(dev_trans_start);
225
226static void dev_watchdog(unsigned long arg)
227{
228 struct net_device *dev = (struct net_device *)arg;
229
230 netif_tx_lock(dev);
231 if (!qdisc_tx_is_noop(dev)) {
232 if (netif_device_present(dev) &&
233 netif_running(dev) &&
234 netif_carrier_ok(dev)) {
235 int some_queue_timedout = 0;
236 unsigned int i;
237 unsigned long trans_start;
238
239 for (i = 0; i < dev->num_tx_queues; i++) {
240 struct netdev_queue *txq;
241
242 txq = netdev_get_tx_queue(dev, i);
243
244
245
246 trans_start = txq->trans_start ? : dev->trans_start;
247 if (netif_tx_queue_stopped(txq) &&
248 time_after(jiffies, (trans_start +
249 dev->watchdog_timeo))) {
250 some_queue_timedout = 1;
251 break;
252 }
253 }
254
255 if (some_queue_timedout) {
256 char drivername[64];
257 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
258 dev->name, netdev_drivername(dev, drivername, 64), i);
259 dev->netdev_ops->ndo_tx_timeout(dev);
260 }
261 if (!mod_timer(&dev->watchdog_timer,
262 round_jiffies(jiffies +
263 dev->watchdog_timeo)))
264 dev_hold(dev);
265 }
266 }
267 netif_tx_unlock(dev);
268
269 dev_put(dev);
270}
271
272void __netdev_watchdog_up(struct net_device *dev)
273{
274 if (dev->netdev_ops->ndo_tx_timeout) {
275 if (dev->watchdog_timeo <= 0)
276 dev->watchdog_timeo = 5*HZ;
277 if (!mod_timer(&dev->watchdog_timer,
278 round_jiffies(jiffies + dev->watchdog_timeo)))
279 dev_hold(dev);
280 }
281}
282
283static void dev_watchdog_up(struct net_device *dev)
284{
285 __netdev_watchdog_up(dev);
286}
287
288static void dev_watchdog_down(struct net_device *dev)
289{
290 netif_tx_lock_bh(dev);
291 if (del_timer(&dev->watchdog_timer))
292 dev_put(dev);
293 netif_tx_unlock_bh(dev);
294}
295
296
297
298
299
300
301
302void netif_carrier_on(struct net_device *dev)
303{
304 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
305 if (dev->reg_state == NETREG_UNINITIALIZED)
306 return;
307 linkwatch_fire_event(dev);
308 if (netif_running(dev))
309 __netdev_watchdog_up(dev);
310 }
311}
312EXPORT_SYMBOL(netif_carrier_on);
313
314
315
316
317
318
319
320void netif_carrier_off(struct net_device *dev)
321{
322 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
323 if (dev->reg_state == NETREG_UNINITIALIZED)
324 return;
325 linkwatch_fire_event(dev);
326 }
327}
328EXPORT_SYMBOL(netif_carrier_off);
329
330
331
332
333
334
335
336
337
338
339
340void netif_notify_peers(struct net_device *dev)
341{
342 rtnl_lock();
343 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
344 rtnl_unlock();
345}
346EXPORT_SYMBOL(netif_notify_peers);
347
348
349
350
351
352
353static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
354{
355 kfree_skb(skb);
356 return NET_XMIT_CN;
357}
358
359static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
360{
361 return NULL;
362}
363
364struct Qdisc_ops noop_qdisc_ops __read_mostly = {
365 .id = "noop",
366 .priv_size = 0,
367 .enqueue = noop_enqueue,
368 .dequeue = noop_dequeue,
369 .peek = noop_dequeue,
370 .owner = THIS_MODULE,
371};
372
373static struct netdev_queue noop_netdev_queue = {
374 .qdisc = &noop_qdisc,
375 .qdisc_sleeping = &noop_qdisc,
376};
377
378struct Qdisc noop_qdisc = {
379 .enqueue = noop_enqueue,
380 .dequeue = noop_dequeue,
381 .flags = TCQ_F_BUILTIN,
382 .ops = &noop_qdisc_ops,
383 .list = LIST_HEAD_INIT(noop_qdisc.list),
384 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
385 .dev_queue = &noop_netdev_queue,
386};
387EXPORT_SYMBOL(noop_qdisc);
388
389static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
390 .id = "noqueue",
391 .priv_size = 0,
392 .enqueue = noop_enqueue,
393 .dequeue = noop_dequeue,
394 .peek = noop_dequeue,
395 .owner = THIS_MODULE,
396};
397
398static struct Qdisc noqueue_qdisc;
399static struct netdev_queue noqueue_netdev_queue = {
400 .qdisc = &noqueue_qdisc,
401 .qdisc_sleeping = &noqueue_qdisc,
402};
403
404static struct Qdisc noqueue_qdisc = {
405 .enqueue = NULL,
406 .dequeue = noop_dequeue,
407 .flags = TCQ_F_BUILTIN,
408 .ops = &noqueue_qdisc_ops,
409 .list = LIST_HEAD_INIT(noqueue_qdisc.list),
410 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
411 .dev_queue = &noqueue_netdev_queue,
412};
413
414
415static const u8 prio2band[TC_PRIO_MAX+1] =
416 { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
417
418
419
420
421
422#define PFIFO_FAST_BANDS 3
423
424
425
426
427
428
429struct pfifo_fast_priv {
430 u32 bitmap;
431 struct sk_buff_head q[PFIFO_FAST_BANDS];
432};
433
434
435
436
437
438
439
440static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0};
441
442static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
443 int band)
444{
445 return priv->q + band;
446}
447
448static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
449{
450 if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
451 int band = prio2band[skb->priority & TC_PRIO_MAX];
452 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
453 struct sk_buff_head *list = band2list(priv, band);
454
455 priv->bitmap |= (1 << band);
456 qdisc->q.qlen++;
457 return __qdisc_enqueue_tail(skb, qdisc, list);
458 }
459
460 return qdisc_drop(skb, qdisc);
461}
462
463static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
464{
465 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
466 int band = bitmap2band[priv->bitmap];
467
468 if (likely(band >= 0)) {
469 struct sk_buff_head *list = band2list(priv, band);
470 struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list);
471
472 qdisc->q.qlen--;
473 if (skb_queue_empty(list))
474 priv->bitmap &= ~(1 << band);
475
476 return skb;
477 }
478
479 return NULL;
480}
481
482static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
483{
484 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
485 int band = bitmap2band[priv->bitmap];
486
487 if (band >= 0) {
488 struct sk_buff_head *list = band2list(priv, band);
489
490 return skb_peek(list);
491 }
492
493 return NULL;
494}
495
496static void pfifo_fast_reset(struct Qdisc* qdisc)
497{
498 int prio;
499 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
500
501 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
502 __qdisc_reset_queue(qdisc, band2list(priv, prio));
503
504 priv->bitmap = 0;
505 qdisc->qstats.backlog = 0;
506 qdisc->q.qlen = 0;
507}
508
509static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
510{
511 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
512
513 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
514 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
515 return skb->len;
516
517nla_put_failure:
518 return -1;
519}
520
521static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
522{
523 int prio;
524 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
525
526 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
527 skb_queue_head_init(band2list(priv, prio));
528
529 return 0;
530}
531
532struct Qdisc_ops pfifo_fast_ops __read_mostly = {
533 .id = "pfifo_fast",
534 .priv_size = sizeof(struct pfifo_fast_priv),
535 .enqueue = pfifo_fast_enqueue,
536 .dequeue = pfifo_fast_dequeue,
537 .peek = pfifo_fast_peek,
538 .init = pfifo_fast_init,
539 .reset = pfifo_fast_reset,
540 .dump = pfifo_fast_dump,
541 .owner = THIS_MODULE,
542};
543
544struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
545 struct Qdisc_ops *ops)
546{
547 void *p;
548 struct Qdisc *sch;
549 unsigned int size;
550 int err = -ENOBUFS;
551
552
553 size = QDISC_ALIGN(sizeof(*sch));
554 size += ops->priv_size + (QDISC_ALIGNTO - 1);
555
556 p = kzalloc(size, GFP_KERNEL);
557 if (!p)
558 goto errout;
559 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
560 sch->padded = (char *) sch - (char *) p;
561
562 INIT_LIST_HEAD(&sch->list);
563 skb_queue_head_init(&sch->q);
564 spin_lock_init(&sch->busylock);
565 sch->ops = ops;
566 sch->enqueue = ops->enqueue;
567 sch->dequeue = ops->dequeue;
568 sch->dev_queue = dev_queue;
569 dev_hold(qdisc_dev(sch));
570 atomic_set(&sch->refcnt, 1);
571
572 return sch;
573errout:
574 return ERR_PTR(err);
575}
576
577struct Qdisc * qdisc_create_dflt(struct net_device *dev,
578 struct netdev_queue *dev_queue,
579 struct Qdisc_ops *ops,
580 unsigned int parentid)
581{
582 struct Qdisc *sch;
583
584 sch = qdisc_alloc(dev_queue, ops);
585 if (IS_ERR(sch))
586 goto errout;
587 sch->parent = parentid;
588
589 if (!ops->init || ops->init(sch, NULL) == 0)
590 return sch;
591
592 qdisc_destroy(sch);
593errout:
594 return NULL;
595}
596EXPORT_SYMBOL(qdisc_create_dflt);
597
598
599
600void qdisc_reset(struct Qdisc *qdisc)
601{
602 const struct Qdisc_ops *ops = qdisc->ops;
603
604 if (ops->reset)
605 ops->reset(qdisc);
606
607 if (qdisc->gso_skb) {
608 kfree_skb(qdisc->gso_skb);
609 qdisc->gso_skb = NULL;
610 qdisc->q.qlen = 0;
611 }
612}
613EXPORT_SYMBOL(qdisc_reset);
614
615static void qdisc_rcu_free(struct rcu_head *head)
616{
617 struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head);
618
619 kfree((char *) qdisc - qdisc->padded);
620}
621
622void qdisc_destroy(struct Qdisc *qdisc)
623{
624 const struct Qdisc_ops *ops = qdisc->ops;
625
626 if (qdisc->flags & TCQ_F_BUILTIN ||
627 !atomic_dec_and_test(&qdisc->refcnt))
628 return;
629
630#ifdef CONFIG_NET_SCHED
631 qdisc_list_del(qdisc);
632
633 qdisc_put_stab(qdisc->stab);
634#endif
635 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
636 if (ops->reset)
637 ops->reset(qdisc);
638 if (ops->destroy)
639 ops->destroy(qdisc);
640
641 module_put(ops->owner);
642 dev_put(qdisc_dev(qdisc));
643
644 kfree_skb(qdisc->gso_skb);
645
646
647
648
649 call_rcu(&qdisc->rcu_head, qdisc_rcu_free);
650}
651EXPORT_SYMBOL(qdisc_destroy);
652
653
654struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
655 struct Qdisc *qdisc)
656{
657 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
658 spinlock_t *root_lock;
659
660 root_lock = qdisc_lock(oqdisc);
661 spin_lock_bh(root_lock);
662
663
664 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
665 qdisc_reset(oqdisc);
666
667
668 if (qdisc == NULL)
669 qdisc = &noop_qdisc;
670 dev_queue->qdisc_sleeping = qdisc;
671 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
672
673 spin_unlock_bh(root_lock);
674
675 return oqdisc;
676}
677
678static void attach_one_default_qdisc(struct net_device *dev,
679 struct netdev_queue *dev_queue,
680 void *_unused)
681{
682 struct Qdisc *qdisc;
683
684 if (dev->tx_queue_len) {
685 qdisc = qdisc_create_dflt(dev, dev_queue,
686 &pfifo_fast_ops, TC_H_ROOT);
687 if (!qdisc) {
688 printk(KERN_INFO "%s: activation failed\n", dev->name);
689 return;
690 }
691
692
693 qdisc->flags |= TCQ_F_CAN_BYPASS;
694 } else {
695 qdisc = &noqueue_qdisc;
696 }
697 dev_queue->qdisc_sleeping = qdisc;
698}
699
700static void attach_default_qdiscs(struct net_device *dev)
701{
702 struct netdev_queue *txq;
703 struct Qdisc *qdisc;
704
705 txq = netdev_get_tx_queue(dev, 0);
706
707 if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) {
708 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
709 dev->qdisc = txq->qdisc_sleeping;
710 atomic_inc(&dev->qdisc->refcnt);
711 } else {
712 qdisc = qdisc_create_dflt(dev, txq, &mq_qdisc_ops, TC_H_ROOT);
713 if (qdisc) {
714 qdisc->ops->attach(qdisc);
715 dev->qdisc = qdisc;
716 }
717 }
718}
719
720static void transition_one_qdisc(struct net_device *dev,
721 struct netdev_queue *dev_queue,
722 void *_need_watchdog)
723{
724 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
725 int *need_watchdog_p = _need_watchdog;
726
727 if (!(new_qdisc->flags & TCQ_F_BUILTIN))
728 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
729
730 rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
731 if (need_watchdog_p && new_qdisc != &noqueue_qdisc) {
732 dev_queue->trans_start = 0;
733 *need_watchdog_p = 1;
734 }
735}
736
737void dev_activate(struct net_device *dev)
738{
739 int need_watchdog;
740
741
742
743
744
745
746
747 if (dev->qdisc == &noop_qdisc)
748 attach_default_qdiscs(dev);
749
750 if (!netif_carrier_ok(dev))
751
752 return;
753
754 need_watchdog = 0;
755 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
756 transition_one_qdisc(dev, &dev->rx_queue, NULL);
757
758 if (need_watchdog) {
759 dev->trans_start = jiffies;
760 dev_watchdog_up(dev);
761 }
762}
763
764static void dev_deactivate_queue(struct net_device *dev,
765 struct netdev_queue *dev_queue,
766 void *_qdisc_default)
767{
768 struct Qdisc *qdisc_default = _qdisc_default;
769 struct Qdisc *qdisc;
770
771 qdisc = dev_queue->qdisc;
772 if (qdisc) {
773 spin_lock_bh(qdisc_lock(qdisc));
774
775 if (!(qdisc->flags & TCQ_F_BUILTIN))
776 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
777
778 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
779 qdisc_reset(qdisc);
780
781 spin_unlock_bh(qdisc_lock(qdisc));
782 }
783}
784
785static bool some_qdisc_is_busy(struct net_device *dev)
786{
787 unsigned int i;
788
789 for (i = 0; i < dev->num_tx_queues; i++) {
790 struct netdev_queue *dev_queue;
791 spinlock_t *root_lock;
792 struct Qdisc *q;
793 int val;
794
795 dev_queue = netdev_get_tx_queue(dev, i);
796 q = dev_queue->qdisc_sleeping;
797 root_lock = qdisc_lock(q);
798
799 spin_lock_bh(root_lock);
800
801 val = (qdisc_is_running(q) ||
802 test_bit(__QDISC_STATE_SCHED, &q->state));
803
804 spin_unlock_bh(root_lock);
805
806 if (val)
807 return true;
808 }
809 return false;
810}
811
812void dev_deactivate(struct net_device *dev)
813{
814 netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
815 dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc);
816
817 dev_watchdog_down(dev);
818
819
820 synchronize_rcu();
821
822
823 while (some_qdisc_is_busy(dev))
824 yield();
825}
826
827static void dev_init_scheduler_queue(struct net_device *dev,
828 struct netdev_queue *dev_queue,
829 void *_qdisc)
830{
831 struct Qdisc *qdisc = _qdisc;
832
833 dev_queue->qdisc = qdisc;
834 dev_queue->qdisc_sleeping = qdisc;
835}
836
837void dev_init_scheduler(struct net_device *dev)
838{
839 dev->qdisc = &noop_qdisc;
840 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
841 dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
842
843 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
844}
845
846static void shutdown_scheduler_queue(struct net_device *dev,
847 struct netdev_queue *dev_queue,
848 void *_qdisc_default)
849{
850 struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
851 struct Qdisc *qdisc_default = _qdisc_default;
852
853 if (qdisc) {
854 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
855 dev_queue->qdisc_sleeping = qdisc_default;
856
857 qdisc_destroy(qdisc);
858 }
859}
860
861void dev_shutdown(struct net_device *dev)
862{
863 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
864 shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
865 qdisc_destroy(dev->qdisc);
866 dev->qdisc = &noop_qdisc;
867
868 WARN_ON(timer_pending(&dev->watchdog_timer));
869}
870