linux/net/sched/sch_htb.c
<<
>>
Prefs
   1/*
   2 * net/sched/sch_htb.c  Hierarchical token bucket, feed tree version
   3 *
   4 *              This program is free software; you can redistribute it and/or
   5 *              modify it under the terms of the GNU General Public License
   6 *              as published by the Free Software Foundation; either version
   7 *              2 of the License, or (at your option) any later version.
   8 *
   9 * Authors:     Martin Devera, <devik@cdi.cz>
  10 *
  11 * Credits (in time order) for older HTB versions:
  12 *              Stef Coene <stef.coene@docum.org>
  13 *                      HTB support at LARTC mailing list
  14 *              Ondrej Kraus, <krauso@barr.cz>
  15 *                      found missing INIT_QDISC(htb)
  16 *              Vladimir Smelhaus, Aamer Akhter, Bert Hubert
  17 *                      helped a lot to locate nasty class stall bug
  18 *              Andi Kleen, Jamal Hadi, Bert Hubert
  19 *                      code review and helpful comments on shaping
  20 *              Tomasz Wrona, <tw@eter.tym.pl>
  21 *                      created test case so that I was able to fix nasty bug
  22 *              Wilfried Weissmann
  23 *                      spotted bug in dequeue code and helped with fix
  24 *              Jiri Fojtasek
  25 *                      fixed requeue routine
  26 *              and many others. thanks.
  27 */
  28#include <linux/module.h>
  29#include <linux/moduleparam.h>
  30#include <linux/types.h>
  31#include <linux/kernel.h>
  32#include <linux/string.h>
  33#include <linux/errno.h>
  34#include <linux/skbuff.h>
  35#include <linux/list.h>
  36#include <linux/compiler.h>
  37#include <linux/rbtree.h>
  38#include <linux/workqueue.h>
  39#include <linux/slab.h>
  40#include <net/netlink.h>
  41#include <net/pkt_sched.h>
  42
  43/* HTB algorithm.
  44    Author: devik@cdi.cz
  45    ========================================================================
  46    HTB is like TBF with multiple classes. It is also similar to CBQ because
  47    it allows to assign priority to each class in hierarchy.
  48    In fact it is another implementation of Floyd's formal sharing.
  49
  50    Levels:
  51    Each class is assigned level. Leaf has ALWAYS level 0 and root
  52    classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
  53    one less than their parent.
  54*/
  55
  56static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
  57#define HTB_VER 0x30011         /* major must be matched with number suplied by TC as version */
  58
  59#if HTB_VER >> 16 != TC_HTB_PROTOVER
  60#error "Mismatched sch_htb.c and pkt_sch.h"
  61#endif
  62
  63/* Module parameter and sysfs export */
  64module_param    (htb_hysteresis, int, 0640);
  65MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
  66
  67/* used internaly to keep status of single class */
  68enum htb_cmode {
  69        HTB_CANT_SEND,          /* class can't send and can't borrow */
  70        HTB_MAY_BORROW,         /* class can't send but may borrow */
  71        HTB_CAN_SEND            /* class can send */
  72};
  73
  74/* interior & leaf nodes; props specific to leaves are marked L: */
  75struct htb_class {
  76        struct Qdisc_class_common common;
  77        /* general class parameters */
  78        struct gnet_stats_basic_packed bstats;
  79        struct gnet_stats_queue qstats;
  80        struct gnet_stats_rate_est rate_est;
  81        struct tc_htb_xstats xstats;    /* our special stats */
  82        int refcnt;             /* usage count of this class */
  83
  84        /* topology */
  85        int level;              /* our level (see above) */
  86        unsigned int children;
  87        struct htb_class *parent;       /* parent class */
  88
  89        int prio;               /* these two are used only by leaves... */
  90        int quantum;            /* but stored for parent-to-leaf return */
  91
  92        union {
  93                struct htb_class_leaf {
  94                        struct Qdisc *q;
  95                        int deficit[TC_HTB_MAXDEPTH];
  96                        struct list_head drop_list;
  97                } leaf;
  98                struct htb_class_inner {
  99                        struct rb_root feed[TC_HTB_NUMPRIO];    /* feed trees */
 100                        struct rb_node *ptr[TC_HTB_NUMPRIO];    /* current class ptr */
 101                        /* When class changes from state 1->2 and disconnects from
 102                         * parent's feed then we lost ptr value and start from the
 103                         * first child again. Here we store classid of the
 104                         * last valid ptr (used when ptr is NULL).
 105                         */
 106                        u32 last_ptr_id[TC_HTB_NUMPRIO];
 107                } inner;
 108        } un;
 109        struct rb_node node[TC_HTB_NUMPRIO];    /* node for self or feed tree */
 110        struct rb_node pq_node; /* node for event queue */
 111        psched_time_t pq_key;
 112
 113        int prio_activity;      /* for which prios are we active */
 114        enum htb_cmode cmode;   /* current mode of the class */
 115
 116        /* class attached filters */
 117        struct tcf_proto *filter_list;
 118        int filter_cnt;
 119
 120        /* token bucket parameters */
 121        struct qdisc_rate_table *rate;  /* rate table of the class itself */
 122        struct qdisc_rate_table *ceil;  /* ceiling rate (limits borrows too) */
 123        long buffer, cbuffer;   /* token bucket depth/rate */
 124        psched_tdiff_t mbuffer; /* max wait time */
 125        long tokens, ctokens;   /* current number of tokens */
 126        psched_time_t t_c;      /* checkpoint time */
 127};
 128
 129struct htb_sched {
 130        struct Qdisc_class_hash clhash;
 131        struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
 132
 133        /* self list - roots of self generating tree */
 134        struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
 135        int row_mask[TC_HTB_MAXDEPTH];
 136        struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
 137        u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
 138
 139        /* self wait list - roots of wait PQs per row */
 140        struct rb_root wait_pq[TC_HTB_MAXDEPTH];
 141
 142        /* time of nearest event per level (row) */
 143        psched_time_t near_ev_cache[TC_HTB_MAXDEPTH];
 144
 145        int defcls;             /* class where unclassified flows go to */
 146
 147        /* filters for qdisc itself */
 148        struct tcf_proto *filter_list;
 149
 150        int rate2quantum;       /* quant = rate / rate2quantum */
 151        psched_time_t now;      /* cached dequeue time */
 152        struct qdisc_watchdog watchdog;
 153
 154        /* non shaped skbs; let them go directly thru */
 155        struct sk_buff_head direct_queue;
 156        int direct_qlen;        /* max qlen of above */
 157
 158        long direct_pkts;
 159
 160#define HTB_WARN_TOOMANYEVENTS  0x1
 161        unsigned int warned;    /* only one warning */
 162        struct work_struct work;
 163};
 164
 165/* find class in global hash table using given handle */
 166static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
 167{
 168        struct htb_sched *q = qdisc_priv(sch);
 169        struct Qdisc_class_common *clc;
 170
 171        clc = qdisc_class_find(&q->clhash, handle);
 172        if (clc == NULL)
 173                return NULL;
 174        return container_of(clc, struct htb_class, common);
 175}
 176
 177/**
 178 * htb_classify - classify a packet into class
 179 *
 180 * It returns NULL if the packet should be dropped or -1 if the packet
 181 * should be passed directly thru. In all other cases leaf class is returned.
 182 * We allow direct class selection by classid in priority. The we examine
 183 * filters in qdisc and in inner nodes (if higher filter points to the inner
 184 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
 185 * internal fifo (direct). These packets then go directly thru. If we still
 186 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
 187 * then finish and return direct queue.
 188 */
 189#define HTB_DIRECT ((struct htb_class *)-1L)
 190
 191static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
 192                                      int *qerr)
 193{
 194        struct htb_sched *q = qdisc_priv(sch);
 195        struct htb_class *cl;
 196        struct tcf_result res;
 197        struct tcf_proto *tcf;
 198        int result;
 199
 200        /* allow to select class by setting skb->priority to valid classid;
 201         * note that nfmark can be used too by attaching filter fw with no
 202         * rules in it
 203         */
 204        if (skb->priority == sch->handle)
 205                return HTB_DIRECT;      /* X:0 (direct flow) selected */
 206        cl = htb_find(skb->priority, sch);
 207        if (cl && cl->level == 0)
 208                return cl;
 209
 210        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 211        tcf = q->filter_list;
 212        while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
 213#ifdef CONFIG_NET_CLS_ACT
 214                switch (result) {
 215                case TC_ACT_QUEUED:
 216                case TC_ACT_STOLEN:
 217                        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 218                case TC_ACT_SHOT:
 219                        return NULL;
 220                }
 221#endif
 222                cl = (void *)res.class;
 223                if (!cl) {
 224                        if (res.classid == sch->handle)
 225                                return HTB_DIRECT;      /* X:0 (direct flow) */
 226                        cl = htb_find(res.classid, sch);
 227                        if (!cl)
 228                                break;  /* filter selected invalid classid */
 229                }
 230                if (!cl->level)
 231                        return cl;      /* we hit leaf; return it */
 232
 233                /* we have got inner class; apply inner filter chain */
 234                tcf = cl->filter_list;
 235        }
 236        /* classification failed; try to use default class */
 237        cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
 238        if (!cl || cl->level)
 239                return HTB_DIRECT;      /* bad default .. this is safe bet */
 240        return cl;
 241}
 242
 243/**
 244 * htb_add_to_id_tree - adds class to the round robin list
 245 *
 246 * Routine adds class to the list (actually tree) sorted by classid.
 247 * Make sure that class is not already on such list for given prio.
 248 */
 249static void htb_add_to_id_tree(struct rb_root *root,
 250                               struct htb_class *cl, int prio)
 251{
 252        struct rb_node **p = &root->rb_node, *parent = NULL;
 253
 254        while (*p) {
 255                struct htb_class *c;
 256                parent = *p;
 257                c = rb_entry(parent, struct htb_class, node[prio]);
 258
 259                if (cl->common.classid > c->common.classid)
 260                        p = &parent->rb_right;
 261                else
 262                        p = &parent->rb_left;
 263        }
 264        rb_link_node(&cl->node[prio], parent, p);
 265        rb_insert_color(&cl->node[prio], root);
 266}
 267
 268/**
 269 * htb_add_to_wait_tree - adds class to the event queue with delay
 270 *
 271 * The class is added to priority event queue to indicate that class will
 272 * change its mode in cl->pq_key microseconds. Make sure that class is not
 273 * already in the queue.
 274 */
 275static void htb_add_to_wait_tree(struct htb_sched *q,
 276                                 struct htb_class *cl, long delay)
 277{
 278        struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
 279
 280        cl->pq_key = q->now + delay;
 281        if (cl->pq_key == q->now)
 282                cl->pq_key++;
 283
 284        /* update the nearest event cache */
 285        if (q->near_ev_cache[cl->level] > cl->pq_key)
 286                q->near_ev_cache[cl->level] = cl->pq_key;
 287
 288        while (*p) {
 289                struct htb_class *c;
 290                parent = *p;
 291                c = rb_entry(parent, struct htb_class, pq_node);
 292                if (cl->pq_key >= c->pq_key)
 293                        p = &parent->rb_right;
 294                else
 295                        p = &parent->rb_left;
 296        }
 297        rb_link_node(&cl->pq_node, parent, p);
 298        rb_insert_color(&cl->pq_node, &q->wait_pq[cl->level]);
 299}
 300
 301/**
 302 * htb_next_rb_node - finds next node in binary tree
 303 *
 304 * When we are past last key we return NULL.
 305 * Average complexity is 2 steps per call.
 306 */
 307static inline void htb_next_rb_node(struct rb_node **n)
 308{
 309        *n = rb_next(*n);
 310}
 311
 312/**
 313 * htb_add_class_to_row - add class to its row
 314 *
 315 * The class is added to row at priorities marked in mask.
 316 * It does nothing if mask == 0.
 317 */
 318static inline void htb_add_class_to_row(struct htb_sched *q,
 319                                        struct htb_class *cl, int mask)
 320{
 321        q->row_mask[cl->level] |= mask;
 322        while (mask) {
 323                int prio = ffz(~mask);
 324                mask &= ~(1 << prio);
 325                htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
 326        }
 327}
 328
 329/* If this triggers, it is a bug in this code, but it need not be fatal */
 330static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
 331{
 332        if (RB_EMPTY_NODE(rb)) {
 333                WARN_ON(1);
 334        } else {
 335                rb_erase(rb, root);
 336                RB_CLEAR_NODE(rb);
 337        }
 338}
 339
 340
 341/**
 342 * htb_remove_class_from_row - removes class from its row
 343 *
 344 * The class is removed from row at priorities marked in mask.
 345 * It does nothing if mask == 0.
 346 */
 347static inline void htb_remove_class_from_row(struct htb_sched *q,
 348                                                 struct htb_class *cl, int mask)
 349{
 350        int m = 0;
 351
 352        while (mask) {
 353                int prio = ffz(~mask);
 354
 355                mask &= ~(1 << prio);
 356                if (q->ptr[cl->level][prio] == cl->node + prio)
 357                        htb_next_rb_node(q->ptr[cl->level] + prio);
 358
 359                htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio);
 360                if (!q->row[cl->level][prio].rb_node)
 361                        m |= 1 << prio;
 362        }
 363        q->row_mask[cl->level] &= ~m;
 364}
 365
 366/**
 367 * htb_activate_prios - creates active classe's feed chain
 368 *
 369 * The class is connected to ancestors and/or appropriate rows
 370 * for priorities it is participating on. cl->cmode must be new
 371 * (activated) mode. It does nothing if cl->prio_activity == 0.
 372 */
 373static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
 374{
 375        struct htb_class *p = cl->parent;
 376        long m, mask = cl->prio_activity;
 377
 378        while (cl->cmode == HTB_MAY_BORROW && p && mask) {
 379                m = mask;
 380                while (m) {
 381                        int prio = ffz(~m);
 382                        m &= ~(1 << prio);
 383
 384                        if (p->un.inner.feed[prio].rb_node)
 385                                /* parent already has its feed in use so that
 386                                 * reset bit in mask as parent is already ok
 387                                 */
 388                                mask &= ~(1 << prio);
 389
 390                        htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
 391                }
 392                p->prio_activity |= mask;
 393                cl = p;
 394                p = cl->parent;
 395
 396        }
 397        if (cl->cmode == HTB_CAN_SEND && mask)
 398                htb_add_class_to_row(q, cl, mask);
 399}
 400
 401/**
 402 * htb_deactivate_prios - remove class from feed chain
 403 *
 404 * cl->cmode must represent old mode (before deactivation). It does
 405 * nothing if cl->prio_activity == 0. Class is removed from all feed
 406 * chains and rows.
 407 */
 408static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
 409{
 410        struct htb_class *p = cl->parent;
 411        long m, mask = cl->prio_activity;
 412
 413        while (cl->cmode == HTB_MAY_BORROW && p && mask) {
 414                m = mask;
 415                mask = 0;
 416                while (m) {
 417                        int prio = ffz(~m);
 418                        m &= ~(1 << prio);
 419
 420                        if (p->un.inner.ptr[prio] == cl->node + prio) {
 421                                /* we are removing child which is pointed to from
 422                                 * parent feed - forget the pointer but remember
 423                                 * classid
 424                                 */
 425                                p->un.inner.last_ptr_id[prio] = cl->common.classid;
 426                                p->un.inner.ptr[prio] = NULL;
 427                        }
 428
 429                        htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio);
 430
 431                        if (!p->un.inner.feed[prio].rb_node)
 432                                mask |= 1 << prio;
 433                }
 434
 435                p->prio_activity &= ~mask;
 436                cl = p;
 437                p = cl->parent;
 438
 439        }
 440        if (cl->cmode == HTB_CAN_SEND && mask)
 441                htb_remove_class_from_row(q, cl, mask);
 442}
 443
 444static inline long htb_lowater(const struct htb_class *cl)
 445{
 446        if (htb_hysteresis)
 447                return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
 448        else
 449                return 0;
 450}
 451static inline long htb_hiwater(const struct htb_class *cl)
 452{
 453        if (htb_hysteresis)
 454                return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
 455        else
 456                return 0;
 457}
 458
 459
 460/**
 461 * htb_class_mode - computes and returns current class mode
 462 *
 463 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
 464 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
 465 * from now to time when cl will change its state.
 466 * Also it is worth to note that class mode doesn't change simply
 467 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
 468 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
 469 * mode transitions per time unit. The speed gain is about 1/6.
 470 */
 471static inline enum htb_cmode
 472htb_class_mode(struct htb_class *cl, long *diff)
 473{
 474        long toks;
 475
 476        if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
 477                *diff = -toks;
 478                return HTB_CANT_SEND;
 479        }
 480
 481        if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
 482                return HTB_CAN_SEND;
 483
 484        *diff = -toks;
 485        return HTB_MAY_BORROW;
 486}
 487
 488/**
 489 * htb_change_class_mode - changes classe's mode
 490 *
 491 * This should be the only way how to change classe's mode under normal
 492 * cirsumstances. Routine will update feed lists linkage, change mode
 493 * and add class to the wait event queue if appropriate. New mode should
 494 * be different from old one and cl->pq_key has to be valid if changing
 495 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
 496 */
 497static void
 498htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
 499{
 500        enum htb_cmode new_mode = htb_class_mode(cl, diff);
 501
 502        if (new_mode == cl->cmode)
 503                return;
 504
 505        if (cl->prio_activity) {        /* not necessary: speed optimization */
 506                if (cl->cmode != HTB_CANT_SEND)
 507                        htb_deactivate_prios(q, cl);
 508                cl->cmode = new_mode;
 509                if (new_mode != HTB_CANT_SEND)
 510                        htb_activate_prios(q, cl);
 511        } else
 512                cl->cmode = new_mode;
 513}
 514
 515/**
 516 * htb_activate - inserts leaf cl into appropriate active feeds
 517 *
 518 * Routine learns (new) priority of leaf and activates feed chain
 519 * for the prio. It can be called on already active leaf safely.
 520 * It also adds leaf into droplist.
 521 */
 522static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
 523{
 524        WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
 525
 526        if (!cl->prio_activity) {
 527                cl->prio_activity = 1 << cl->prio;
 528                htb_activate_prios(q, cl);
 529                list_add_tail(&cl->un.leaf.drop_list,
 530                              q->drops + cl->prio);
 531        }
 532}
 533
 534/**
 535 * htb_deactivate - remove leaf cl from active feeds
 536 *
 537 * Make sure that leaf is active. In the other words it can't be called
 538 * with non-active leaf. It also removes class from the drop list.
 539 */
 540static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
 541{
 542        WARN_ON(!cl->prio_activity);
 543
 544        htb_deactivate_prios(q, cl);
 545        cl->prio_activity = 0;
 546        list_del_init(&cl->un.leaf.drop_list);
 547}
 548
 549static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 550{
 551        int uninitialized_var(ret);
 552        struct htb_sched *q = qdisc_priv(sch);
 553        struct htb_class *cl = htb_classify(skb, sch, &ret);
 554
 555        if (cl == HTB_DIRECT) {
 556                /* enqueue to helper queue */
 557                if (q->direct_queue.qlen < q->direct_qlen) {
 558                        __skb_queue_tail(&q->direct_queue, skb);
 559                        q->direct_pkts++;
 560                } else {
 561                        return qdisc_drop(skb, sch);
 562                }
 563#ifdef CONFIG_NET_CLS_ACT
 564        } else if (!cl) {
 565                if (ret & __NET_XMIT_BYPASS)
 566                        sch->qstats.drops++;
 567                kfree_skb(skb);
 568                return ret;
 569#endif
 570        } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
 571                if (net_xmit_drop_count(ret)) {
 572                        sch->qstats.drops++;
 573                        cl->qstats.drops++;
 574                }
 575                return ret;
 576        } else {
 577                htb_activate(q, cl);
 578        }
 579
 580        sch->q.qlen++;
 581        return NET_XMIT_SUCCESS;
 582}
 583
 584static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, long diff)
 585{
 586        long toks = diff + cl->tokens;
 587
 588        if (toks > cl->buffer)
 589                toks = cl->buffer;
 590        toks -= (long) qdisc_l2t(cl->rate, bytes);
 591        if (toks <= -cl->mbuffer)
 592                toks = 1 - cl->mbuffer;
 593
 594        cl->tokens = toks;
 595}
 596
 597static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, long diff)
 598{
 599        long toks = diff + cl->ctokens;
 600
 601        if (toks > cl->cbuffer)
 602                toks = cl->cbuffer;
 603        toks -= (long) qdisc_l2t(cl->ceil, bytes);
 604        if (toks <= -cl->mbuffer)
 605                toks = 1 - cl->mbuffer;
 606
 607        cl->ctokens = toks;
 608}
 609
 610/**
 611 * htb_charge_class - charges amount "bytes" to leaf and ancestors
 612 *
 613 * Routine assumes that packet "bytes" long was dequeued from leaf cl
 614 * borrowing from "level". It accounts bytes to ceil leaky bucket for
 615 * leaf and all ancestors and to rate bucket for ancestors at levels
 616 * "level" and higher. It also handles possible change of mode resulting
 617 * from the update. Note that mode can also increase here (MAY_BORROW to
 618 * CAN_SEND) because we can use more precise clock that event queue here.
 619 * In such case we remove class from event queue first.
 620 */
 621static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
 622                             int level, struct sk_buff *skb)
 623{
 624        int bytes = qdisc_pkt_len(skb);
 625        enum htb_cmode old_mode;
 626        long diff;
 627
 628        while (cl) {
 629                diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
 630                if (cl->level >= level) {
 631                        if (cl->level == level)
 632                                cl->xstats.lends++;
 633                        htb_accnt_tokens(cl, bytes, diff);
 634                } else {
 635                        cl->xstats.borrows++;
 636                        cl->tokens += diff;     /* we moved t_c; update tokens */
 637                }
 638                htb_accnt_ctokens(cl, bytes, diff);
 639                cl->t_c = q->now;
 640
 641                old_mode = cl->cmode;
 642                diff = 0;
 643                htb_change_class_mode(q, cl, &diff);
 644                if (old_mode != cl->cmode) {
 645                        if (old_mode != HTB_CAN_SEND)
 646                                htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
 647                        if (cl->cmode != HTB_CAN_SEND)
 648                                htb_add_to_wait_tree(q, cl, diff);
 649                }
 650
 651                /* update basic stats except for leaves which are already updated */
 652                if (cl->level)
 653                        bstats_update(&cl->bstats, skb);
 654
 655                cl = cl->parent;
 656        }
 657}
 658
 659/**
 660 * htb_do_events - make mode changes to classes at the level
 661 *
 662 * Scans event queue for pending events and applies them. Returns time of
 663 * next pending event (0 for no event in pq, q->now for too many events).
 664 * Note: Applied are events whose have cl->pq_key <= q->now.
 665 */
 666static psched_time_t htb_do_events(struct htb_sched *q, int level,
 667                                   unsigned long start)
 668{
 669        /* don't run for longer than 2 jiffies; 2 is used instead of
 670         * 1 to simplify things when jiffy is going to be incremented
 671         * too soon
 672         */
 673        unsigned long stop_at = start + 2;
 674        while (time_before(jiffies, stop_at)) {
 675                struct htb_class *cl;
 676                long diff;
 677                struct rb_node *p = rb_first(&q->wait_pq[level]);
 678
 679                if (!p)
 680                        return 0;
 681
 682                cl = rb_entry(p, struct htb_class, pq_node);
 683                if (cl->pq_key > q->now)
 684                        return cl->pq_key;
 685
 686                htb_safe_rb_erase(p, q->wait_pq + level);
 687                diff = psched_tdiff_bounded(q->now, cl->t_c, cl->mbuffer);
 688                htb_change_class_mode(q, cl, &diff);
 689                if (cl->cmode != HTB_CAN_SEND)
 690                        htb_add_to_wait_tree(q, cl, diff);
 691        }
 692
 693        /* too much load - let's continue after a break for scheduling */
 694        if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
 695                pr_warning("htb: too many events!\n");
 696                q->warned |= HTB_WARN_TOOMANYEVENTS;
 697        }
 698
 699        return q->now;
 700}
 701
 702/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
 703 * is no such one exists.
 704 */
 705static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
 706                                              u32 id)
 707{
 708        struct rb_node *r = NULL;
 709        while (n) {
 710                struct htb_class *cl =
 711                    rb_entry(n, struct htb_class, node[prio]);
 712
 713                if (id > cl->common.classid) {
 714                        n = n->rb_right;
 715                } else if (id < cl->common.classid) {
 716                        r = n;
 717                        n = n->rb_left;
 718                } else {
 719                        return n;
 720                }
 721        }
 722        return r;
 723}
 724
 725/**
 726 * htb_lookup_leaf - returns next leaf class in DRR order
 727 *
 728 * Find leaf where current feed pointers points to.
 729 */
 730static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
 731                                         struct rb_node **pptr, u32 * pid)
 732{
 733        int i;
 734        struct {
 735                struct rb_node *root;
 736                struct rb_node **pptr;
 737                u32 *pid;
 738        } stk[TC_HTB_MAXDEPTH], *sp = stk;
 739
 740        BUG_ON(!tree->rb_node);
 741        sp->root = tree->rb_node;
 742        sp->pptr = pptr;
 743        sp->pid = pid;
 744
 745        for (i = 0; i < 65535; i++) {
 746                if (!*sp->pptr && *sp->pid) {
 747                        /* ptr was invalidated but id is valid - try to recover
 748                         * the original or next ptr
 749                         */
 750                        *sp->pptr =
 751                            htb_id_find_next_upper(prio, sp->root, *sp->pid);
 752                }
 753                *sp->pid = 0;   /* ptr is valid now so that remove this hint as it
 754                                 * can become out of date quickly
 755                                 */
 756                if (!*sp->pptr) {       /* we are at right end; rewind & go up */
 757                        *sp->pptr = sp->root;
 758                        while ((*sp->pptr)->rb_left)
 759                                *sp->pptr = (*sp->pptr)->rb_left;
 760                        if (sp > stk) {
 761                                sp--;
 762                                if (!*sp->pptr) {
 763                                        WARN_ON(1);
 764                                        return NULL;
 765                                }
 766                                htb_next_rb_node(sp->pptr);
 767                        }
 768                } else {
 769                        struct htb_class *cl;
 770                        cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
 771                        if (!cl->level)
 772                                return cl;
 773                        (++sp)->root = cl->un.inner.feed[prio].rb_node;
 774                        sp->pptr = cl->un.inner.ptr + prio;
 775                        sp->pid = cl->un.inner.last_ptr_id + prio;
 776                }
 777        }
 778        WARN_ON(1);
 779        return NULL;
 780}
 781
 782/* dequeues packet at given priority and level; call only if
 783 * you are sure that there is active class at prio/level
 784 */
 785static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
 786                                        int level)
 787{
 788        struct sk_buff *skb = NULL;
 789        struct htb_class *cl, *start;
 790        /* look initial class up in the row */
 791        start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
 792                                     q->ptr[level] + prio,
 793                                     q->last_ptr_id[level] + prio);
 794
 795        do {
 796next:
 797                if (unlikely(!cl))
 798                        return NULL;
 799
 800                /* class can be empty - it is unlikely but can be true if leaf
 801                 * qdisc drops packets in enqueue routine or if someone used
 802                 * graft operation on the leaf since last dequeue;
 803                 * simply deactivate and skip such class
 804                 */
 805                if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
 806                        struct htb_class *next;
 807                        htb_deactivate(q, cl);
 808
 809                        /* row/level might become empty */
 810                        if ((q->row_mask[level] & (1 << prio)) == 0)
 811                                return NULL;
 812
 813                        next = htb_lookup_leaf(q->row[level] + prio,
 814                                               prio, q->ptr[level] + prio,
 815                                               q->last_ptr_id[level] + prio);
 816
 817                        if (cl == start)        /* fix start if we just deleted it */
 818                                start = next;
 819                        cl = next;
 820                        goto next;
 821                }
 822
 823                skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
 824                if (likely(skb != NULL))
 825                        break;
 826
 827                qdisc_warn_nonwc("htb", cl->un.leaf.q);
 828                htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
 829                                  ptr[0]) + prio);
 830                cl = htb_lookup_leaf(q->row[level] + prio, prio,
 831                                     q->ptr[level] + prio,
 832                                     q->last_ptr_id[level] + prio);
 833
 834        } while (cl != start);
 835
 836        if (likely(skb != NULL)) {
 837                bstats_update(&cl->bstats, skb);
 838                cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
 839                if (cl->un.leaf.deficit[level] < 0) {
 840                        cl->un.leaf.deficit[level] += cl->quantum;
 841                        htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
 842                                          ptr[0]) + prio);
 843                }
 844                /* this used to be after charge_class but this constelation
 845                 * gives us slightly better performance
 846                 */
 847                if (!cl->un.leaf.q->q.qlen)
 848                        htb_deactivate(q, cl);
 849                htb_charge_class(q, cl, level, skb);
 850        }
 851        return skb;
 852}
 853
 854static struct sk_buff *htb_dequeue(struct Qdisc *sch)
 855{
 856        struct sk_buff *skb;
 857        struct htb_sched *q = qdisc_priv(sch);
 858        int level;
 859        psched_time_t next_event;
 860        unsigned long start_at;
 861
 862        /* try to dequeue direct packets as high prio (!) to minimize cpu work */
 863        skb = __skb_dequeue(&q->direct_queue);
 864        if (skb != NULL) {
 865ok:
 866                qdisc_bstats_update(sch, skb);
 867                qdisc_unthrottled(sch);
 868                sch->q.qlen--;
 869                return skb;
 870        }
 871
 872        if (!sch->q.qlen)
 873                goto fin;
 874        q->now = psched_get_time();
 875        start_at = jiffies;
 876
 877        next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
 878
 879        for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
 880                /* common case optimization - skip event handler quickly */
 881                int m;
 882                psched_time_t event;
 883
 884                if (q->now >= q->near_ev_cache[level]) {
 885                        event = htb_do_events(q, level, start_at);
 886                        if (!event)
 887                                event = q->now + PSCHED_TICKS_PER_SEC;
 888                        q->near_ev_cache[level] = event;
 889                } else
 890                        event = q->near_ev_cache[level];
 891
 892                if (next_event > event)
 893                        next_event = event;
 894
 895                m = ~q->row_mask[level];
 896                while (m != (int)(-1)) {
 897                        int prio = ffz(m);
 898
 899                        m |= 1 << prio;
 900                        skb = htb_dequeue_tree(q, prio, level);
 901                        if (likely(skb != NULL))
 902                                goto ok;
 903                }
 904        }
 905        sch->qstats.overlimits++;
 906        if (likely(next_event > q->now))
 907                qdisc_watchdog_schedule(&q->watchdog, next_event);
 908        else
 909                schedule_work(&q->work);
 910fin:
 911        return skb;
 912}
 913
 914/* try to drop from each class (by prio) until one succeed */
 915static unsigned int htb_drop(struct Qdisc *sch)
 916{
 917        struct htb_sched *q = qdisc_priv(sch);
 918        int prio;
 919
 920        for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
 921                struct list_head *p;
 922                list_for_each(p, q->drops + prio) {
 923                        struct htb_class *cl = list_entry(p, struct htb_class,
 924                                                          un.leaf.drop_list);
 925                        unsigned int len;
 926                        if (cl->un.leaf.q->ops->drop &&
 927                            (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
 928                                sch->q.qlen--;
 929                                if (!cl->un.leaf.q->q.qlen)
 930                                        htb_deactivate(q, cl);
 931                                return len;
 932                        }
 933                }
 934        }
 935        return 0;
 936}
 937
 938/* reset all classes */
 939/* always caled under BH & queue lock */
 940static void htb_reset(struct Qdisc *sch)
 941{
 942        struct htb_sched *q = qdisc_priv(sch);
 943        struct htb_class *cl;
 944        struct hlist_node *n;
 945        unsigned int i;
 946
 947        for (i = 0; i < q->clhash.hashsize; i++) {
 948                hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
 949                        if (cl->level)
 950                                memset(&cl->un.inner, 0, sizeof(cl->un.inner));
 951                        else {
 952                                if (cl->un.leaf.q)
 953                                        qdisc_reset(cl->un.leaf.q);
 954                                INIT_LIST_HEAD(&cl->un.leaf.drop_list);
 955                        }
 956                        cl->prio_activity = 0;
 957                        cl->cmode = HTB_CAN_SEND;
 958
 959                }
 960        }
 961        qdisc_watchdog_cancel(&q->watchdog);
 962        __skb_queue_purge(&q->direct_queue);
 963        sch->q.qlen = 0;
 964        memset(q->row, 0, sizeof(q->row));
 965        memset(q->row_mask, 0, sizeof(q->row_mask));
 966        memset(q->wait_pq, 0, sizeof(q->wait_pq));
 967        memset(q->ptr, 0, sizeof(q->ptr));
 968        for (i = 0; i < TC_HTB_NUMPRIO; i++)
 969                INIT_LIST_HEAD(q->drops + i);
 970}
 971
 972static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
 973        [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
 974        [TCA_HTB_INIT]  = { .len = sizeof(struct tc_htb_glob) },
 975        [TCA_HTB_CTAB]  = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
 976        [TCA_HTB_RTAB]  = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
 977};
 978
 979static void htb_work_func(struct work_struct *work)
 980{
 981        struct htb_sched *q = container_of(work, struct htb_sched, work);
 982        struct Qdisc *sch = q->watchdog.qdisc;
 983
 984        __netif_schedule(qdisc_root(sch));
 985}
 986
 987static int htb_init(struct Qdisc *sch, struct nlattr *opt)
 988{
 989        struct htb_sched *q = qdisc_priv(sch);
 990        struct nlattr *tb[TCA_HTB_INIT + 1];
 991        struct tc_htb_glob *gopt;
 992        int err;
 993        int i;
 994
 995        if (!opt)
 996                return -EINVAL;
 997
 998        err = nla_parse_nested(tb, TCA_HTB_INIT, opt, htb_policy);
 999        if (err < 0)
1000                return err;
1001
1002        if (tb[TCA_HTB_INIT] == NULL) {
1003                pr_err("HTB: hey probably you have bad tc tool ?\n");
1004                return -EINVAL;
1005        }
1006        gopt = nla_data(tb[TCA_HTB_INIT]);
1007        if (gopt->version != HTB_VER >> 16) {
1008                pr_err("HTB: need tc/htb version %d (minor is %d), you have %d\n",
1009                       HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
1010                return -EINVAL;
1011        }
1012
1013        err = qdisc_class_hash_init(&q->clhash);
1014        if (err < 0)
1015                return err;
1016        for (i = 0; i < TC_HTB_NUMPRIO; i++)
1017                INIT_LIST_HEAD(q->drops + i);
1018
1019        qdisc_watchdog_init(&q->watchdog, sch);
1020        INIT_WORK(&q->work, htb_work_func);
1021        skb_queue_head_init(&q->direct_queue);
1022
1023        q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1024        if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1025                q->direct_qlen = 2;
1026
1027        if ((q->rate2quantum = gopt->rate2quantum) < 1)
1028                q->rate2quantum = 1;
1029        q->defcls = gopt->defcls;
1030
1031        return 0;
1032}
1033
1034static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1035{
1036        spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1037        struct htb_sched *q = qdisc_priv(sch);
1038        struct nlattr *nest;
1039        struct tc_htb_glob gopt;
1040
1041        spin_lock_bh(root_lock);
1042
1043        gopt.direct_pkts = q->direct_pkts;
1044        gopt.version = HTB_VER;
1045        gopt.rate2quantum = q->rate2quantum;
1046        gopt.defcls = q->defcls;
1047        gopt.debug = 0;
1048
1049        nest = nla_nest_start(skb, TCA_OPTIONS);
1050        if (nest == NULL)
1051                goto nla_put_failure;
1052        if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt))
1053                goto nla_put_failure;
1054        nla_nest_end(skb, nest);
1055
1056        spin_unlock_bh(root_lock);
1057        return skb->len;
1058
1059nla_put_failure:
1060        spin_unlock_bh(root_lock);
1061        nla_nest_cancel(skb, nest);
1062        return -1;
1063}
1064
1065static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1066                          struct sk_buff *skb, struct tcmsg *tcm)
1067{
1068        struct htb_class *cl = (struct htb_class *)arg;
1069        spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1070        struct nlattr *nest;
1071        struct tc_htb_opt opt;
1072
1073        spin_lock_bh(root_lock);
1074        tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1075        tcm->tcm_handle = cl->common.classid;
1076        if (!cl->level && cl->un.leaf.q)
1077                tcm->tcm_info = cl->un.leaf.q->handle;
1078
1079        nest = nla_nest_start(skb, TCA_OPTIONS);
1080        if (nest == NULL)
1081                goto nla_put_failure;
1082
1083        memset(&opt, 0, sizeof(opt));
1084
1085        opt.rate = cl->rate->rate;
1086        opt.buffer = cl->buffer;
1087        opt.ceil = cl->ceil->rate;
1088        opt.cbuffer = cl->cbuffer;
1089        opt.quantum = cl->quantum;
1090        opt.prio = cl->prio;
1091        opt.level = cl->level;
1092        if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1093                goto nla_put_failure;
1094
1095        nla_nest_end(skb, nest);
1096        spin_unlock_bh(root_lock);
1097        return skb->len;
1098
1099nla_put_failure:
1100        spin_unlock_bh(root_lock);
1101        nla_nest_cancel(skb, nest);
1102        return -1;
1103}
1104
1105static int
1106htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1107{
1108        struct htb_class *cl = (struct htb_class *)arg;
1109
1110        if (!cl->level && cl->un.leaf.q)
1111                cl->qstats.qlen = cl->un.leaf.q->q.qlen;
1112        cl->xstats.tokens = cl->tokens;
1113        cl->xstats.ctokens = cl->ctokens;
1114
1115        if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
1116            gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
1117            gnet_stats_copy_queue(d, &cl->qstats) < 0)
1118                return -1;
1119
1120        return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1121}
1122
1123static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1124                     struct Qdisc **old)
1125{
1126        struct htb_class *cl = (struct htb_class *)arg;
1127
1128        if (cl->level)
1129                return -EINVAL;
1130        if (new == NULL &&
1131            (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1132                                     cl->common.classid)) == NULL)
1133                return -ENOBUFS;
1134
1135        sch_tree_lock(sch);
1136        *old = cl->un.leaf.q;
1137        cl->un.leaf.q = new;
1138        if (*old != NULL) {
1139                qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1140                qdisc_reset(*old);
1141        }
1142        sch_tree_unlock(sch);
1143        return 0;
1144}
1145
1146static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1147{
1148        struct htb_class *cl = (struct htb_class *)arg;
1149        return !cl->level ? cl->un.leaf.q : NULL;
1150}
1151
1152static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1153{
1154        struct htb_class *cl = (struct htb_class *)arg;
1155
1156        if (cl->un.leaf.q->q.qlen == 0)
1157                htb_deactivate(qdisc_priv(sch), cl);
1158}
1159
1160static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1161{
1162        struct htb_class *cl = htb_find(classid, sch);
1163        if (cl)
1164                cl->refcnt++;
1165        return (unsigned long)cl;
1166}
1167
1168static inline int htb_parent_last_child(struct htb_class *cl)
1169{
1170        if (!cl->parent)
1171                /* the root class */
1172                return 0;
1173        if (cl->parent->children > 1)
1174                /* not the last child */
1175                return 0;
1176        return 1;
1177}
1178
1179static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1180                               struct Qdisc *new_q)
1181{
1182        struct htb_class *parent = cl->parent;
1183
1184        WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
1185
1186        if (parent->cmode != HTB_CAN_SEND)
1187                htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level);
1188
1189        parent->level = 0;
1190        memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1191        INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1192        parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1193        parent->tokens = parent->buffer;
1194        parent->ctokens = parent->cbuffer;
1195        parent->t_c = psched_get_time();
1196        parent->cmode = HTB_CAN_SEND;
1197}
1198
1199static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1200{
1201        if (!cl->level) {
1202                WARN_ON(!cl->un.leaf.q);
1203                qdisc_destroy(cl->un.leaf.q);
1204        }
1205        gen_kill_estimator(&cl->bstats, &cl->rate_est);
1206        qdisc_put_rtab(cl->rate);
1207        qdisc_put_rtab(cl->ceil);
1208
1209        tcf_destroy_chain(&cl->filter_list);
1210        kfree(cl);
1211}
1212
1213static void htb_destroy(struct Qdisc *sch)
1214{
1215        struct htb_sched *q = qdisc_priv(sch);
1216        struct hlist_node *n, *next;
1217        struct htb_class *cl;
1218        unsigned int i;
1219
1220        cancel_work_sync(&q->work);
1221        qdisc_watchdog_cancel(&q->watchdog);
1222        /* This line used to be after htb_destroy_class call below
1223         * and surprisingly it worked in 2.4. But it must precede it
1224         * because filter need its target class alive to be able to call
1225         * unbind_filter on it (without Oops).
1226         */
1227        tcf_destroy_chain(&q->filter_list);
1228
1229        for (i = 0; i < q->clhash.hashsize; i++) {
1230                hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
1231                        tcf_destroy_chain(&cl->filter_list);
1232        }
1233        for (i = 0; i < q->clhash.hashsize; i++) {
1234                hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1235                                          common.hnode)
1236                        htb_destroy_class(sch, cl);
1237        }
1238        qdisc_class_hash_destroy(&q->clhash);
1239        __skb_queue_purge(&q->direct_queue);
1240}
1241
1242static int htb_delete(struct Qdisc *sch, unsigned long arg)
1243{
1244        struct htb_sched *q = qdisc_priv(sch);
1245        struct htb_class *cl = (struct htb_class *)arg;
1246        unsigned int qlen;
1247        struct Qdisc *new_q = NULL;
1248        int last_child = 0;
1249
1250        // TODO: why don't allow to delete subtree ? references ? does
1251        // tc subsys quarantee us that in htb_destroy it holds no class
1252        // refs so that we can remove children safely there ?
1253        if (cl->children || cl->filter_cnt)
1254                return -EBUSY;
1255
1256        if (!cl->level && htb_parent_last_child(cl)) {
1257                new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1258                                          cl->parent->common.classid);
1259                last_child = 1;
1260        }
1261
1262        sch_tree_lock(sch);
1263
1264        if (!cl->level) {
1265                qlen = cl->un.leaf.q->q.qlen;
1266                qdisc_reset(cl->un.leaf.q);
1267                qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
1268        }
1269
1270        /* delete from hash and active; remainder in destroy_class */
1271        qdisc_class_hash_remove(&q->clhash, &cl->common);
1272        if (cl->parent)
1273                cl->parent->children--;
1274
1275        if (cl->prio_activity)
1276                htb_deactivate(q, cl);
1277
1278        if (cl->cmode != HTB_CAN_SEND)
1279                htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
1280
1281        if (last_child)
1282                htb_parent_to_leaf(q, cl, new_q);
1283
1284        BUG_ON(--cl->refcnt == 0);
1285        /*
1286         * This shouldn't happen: we "hold" one cops->get() when called
1287         * from tc_ctl_tclass; the destroy method is done from cops->put().
1288         */
1289
1290        sch_tree_unlock(sch);
1291        return 0;
1292}
1293
1294static void htb_put(struct Qdisc *sch, unsigned long arg)
1295{
1296        struct htb_class *cl = (struct htb_class *)arg;
1297
1298        if (--cl->refcnt == 0)
1299                htb_destroy_class(sch, cl);
1300}
1301
1302static int htb_change_class(struct Qdisc *sch, u32 classid,
1303                            u32 parentid, struct nlattr **tca,
1304                            unsigned long *arg)
1305{
1306        int err = -EINVAL;
1307        struct htb_sched *q = qdisc_priv(sch);
1308        struct htb_class *cl = (struct htb_class *)*arg, *parent;
1309        struct nlattr *opt = tca[TCA_OPTIONS];
1310        struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1311        struct nlattr *tb[__TCA_HTB_MAX];
1312        struct tc_htb_opt *hopt;
1313
1314        /* extract all subattrs from opt attr */
1315        if (!opt)
1316                goto failure;
1317
1318        err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy);
1319        if (err < 0)
1320                goto failure;
1321
1322        err = -EINVAL;
1323        if (tb[TCA_HTB_PARMS] == NULL)
1324                goto failure;
1325
1326        parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1327
1328        hopt = nla_data(tb[TCA_HTB_PARMS]);
1329
1330        rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
1331        ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
1332        if (!rtab || !ctab)
1333                goto failure;
1334
1335        if (!cl) {              /* new class */
1336                struct Qdisc *new_q;
1337                int prio;
1338                struct {
1339                        struct nlattr           nla;
1340                        struct gnet_estimator   opt;
1341                } est = {
1342                        .nla = {
1343                                .nla_len        = nla_attr_size(sizeof(est.opt)),
1344                                .nla_type       = TCA_RATE,
1345                        },
1346                        .opt = {
1347                                /* 4s interval, 16s averaging constant */
1348                                .interval       = 2,
1349                                .ewma_log       = 2,
1350                        },
1351                };
1352
1353                /* check for valid classid */
1354                if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1355                    htb_find(classid, sch))
1356                        goto failure;
1357
1358                /* check maximal depth */
1359                if (parent && parent->parent && parent->parent->level < 2) {
1360                        pr_err("htb: tree is too deep\n");
1361                        goto failure;
1362                }
1363                err = -ENOBUFS;
1364                cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1365                if (!cl)
1366                        goto failure;
1367
1368                err = gen_new_estimator(&cl->bstats, &cl->rate_est,
1369                                        qdisc_root_sleeping_lock(sch),
1370                                        tca[TCA_RATE] ? : &est.nla);
1371                if (err) {
1372                        kfree(cl);
1373                        goto failure;
1374                }
1375
1376                cl->refcnt = 1;
1377                cl->children = 0;
1378                INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1379                RB_CLEAR_NODE(&cl->pq_node);
1380
1381                for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1382                        RB_CLEAR_NODE(&cl->node[prio]);
1383
1384                /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1385                 * so that can't be used inside of sch_tree_lock
1386                 * -- thanks to Karlis Peisenieks
1387                 */
1388                new_q = qdisc_create_dflt(sch->dev_queue,
1389                                          &pfifo_qdisc_ops, classid);
1390                sch_tree_lock(sch);
1391                if (parent && !parent->level) {
1392                        unsigned int qlen = parent->un.leaf.q->q.qlen;
1393
1394                        /* turn parent into inner node */
1395                        qdisc_reset(parent->un.leaf.q);
1396                        qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
1397                        qdisc_destroy(parent->un.leaf.q);
1398                        if (parent->prio_activity)
1399                                htb_deactivate(q, parent);
1400
1401                        /* remove from evt list because of level change */
1402                        if (parent->cmode != HTB_CAN_SEND) {
1403                                htb_safe_rb_erase(&parent->pq_node, q->wait_pq);
1404                                parent->cmode = HTB_CAN_SEND;
1405                        }
1406                        parent->level = (parent->parent ? parent->parent->level
1407                                         : TC_HTB_MAXDEPTH) - 1;
1408                        memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1409                }
1410                /* leaf (we) needs elementary qdisc */
1411                cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1412
1413                cl->common.classid = classid;
1414                cl->parent = parent;
1415
1416                /* set class to be in HTB_CAN_SEND state */
1417                cl->tokens = hopt->buffer;
1418                cl->ctokens = hopt->cbuffer;
1419                cl->mbuffer = 60 * PSCHED_TICKS_PER_SEC;        /* 1min */
1420                cl->t_c = psched_get_time();
1421                cl->cmode = HTB_CAN_SEND;
1422
1423                /* attach to the hash list and parent's family */
1424                qdisc_class_hash_insert(&q->clhash, &cl->common);
1425                if (parent)
1426                        parent->children++;
1427        } else {
1428                if (tca[TCA_RATE]) {
1429                        err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
1430                                                    qdisc_root_sleeping_lock(sch),
1431                                                    tca[TCA_RATE]);
1432                        if (err)
1433                                return err;
1434                }
1435                sch_tree_lock(sch);
1436        }
1437
1438        /* it used to be a nasty bug here, we have to check that node
1439         * is really leaf before changing cl->un.leaf !
1440         */
1441        if (!cl->level) {
1442                cl->quantum = rtab->rate.rate / q->rate2quantum;
1443                if (!hopt->quantum && cl->quantum < 1000) {
1444                        pr_warning(
1445                               "HTB: quantum of class %X is small. Consider r2q change.\n",
1446                               cl->common.classid);
1447                        cl->quantum = 1000;
1448                }
1449                if (!hopt->quantum && cl->quantum > 200000) {
1450                        pr_warning(
1451                               "HTB: quantum of class %X is big. Consider r2q change.\n",
1452                               cl->common.classid);
1453                        cl->quantum = 200000;
1454                }
1455                if (hopt->quantum)
1456                        cl->quantum = hopt->quantum;
1457                if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1458                        cl->prio = TC_HTB_NUMPRIO - 1;
1459        }
1460
1461        cl->buffer = hopt->buffer;
1462        cl->cbuffer = hopt->cbuffer;
1463        if (cl->rate)
1464                qdisc_put_rtab(cl->rate);
1465        cl->rate = rtab;
1466        if (cl->ceil)
1467                qdisc_put_rtab(cl->ceil);
1468        cl->ceil = ctab;
1469        sch_tree_unlock(sch);
1470
1471        qdisc_class_hash_grow(sch, &q->clhash);
1472
1473        *arg = (unsigned long)cl;
1474        return 0;
1475
1476failure:
1477        if (rtab)
1478                qdisc_put_rtab(rtab);
1479        if (ctab)
1480                qdisc_put_rtab(ctab);
1481        return err;
1482}
1483
1484static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1485{
1486        struct htb_sched *q = qdisc_priv(sch);
1487        struct htb_class *cl = (struct htb_class *)arg;
1488        struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1489
1490        return fl;
1491}
1492
1493static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1494                                     u32 classid)
1495{
1496        struct htb_class *cl = htb_find(classid, sch);
1497
1498        /*if (cl && !cl->level) return 0;
1499         * The line above used to be there to prevent attaching filters to
1500         * leaves. But at least tc_index filter uses this just to get class
1501         * for other reasons so that we have to allow for it.
1502         * ----
1503         * 19.6.2002 As Werner explained it is ok - bind filter is just
1504         * another way to "lock" the class - unlike "get" this lock can
1505         * be broken by class during destroy IIUC.
1506         */
1507        if (cl)
1508                cl->filter_cnt++;
1509        return (unsigned long)cl;
1510}
1511
1512static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1513{
1514        struct htb_class *cl = (struct htb_class *)arg;
1515
1516        if (cl)
1517                cl->filter_cnt--;
1518}
1519
1520static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1521{
1522        struct htb_sched *q = qdisc_priv(sch);
1523        struct htb_class *cl;
1524        struct hlist_node *n;
1525        unsigned int i;
1526
1527        if (arg->stop)
1528                return;
1529
1530        for (i = 0; i < q->clhash.hashsize; i++) {
1531                hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
1532                        if (arg->count < arg->skip) {
1533                                arg->count++;
1534                                continue;
1535                        }
1536                        if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1537                                arg->stop = 1;
1538                                return;
1539                        }
1540                        arg->count++;
1541                }
1542        }
1543}
1544
1545static const struct Qdisc_class_ops htb_class_ops = {
1546        .graft          =       htb_graft,
1547        .leaf           =       htb_leaf,
1548        .qlen_notify    =       htb_qlen_notify,
1549        .get            =       htb_get,
1550        .put            =       htb_put,
1551        .change         =       htb_change_class,
1552        .delete         =       htb_delete,
1553        .walk           =       htb_walk,
1554        .tcf_chain      =       htb_find_tcf,
1555        .bind_tcf       =       htb_bind_filter,
1556        .unbind_tcf     =       htb_unbind_filter,
1557        .dump           =       htb_dump_class,
1558        .dump_stats     =       htb_dump_class_stats,
1559};
1560
1561static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1562        .cl_ops         =       &htb_class_ops,
1563        .id             =       "htb",
1564        .priv_size      =       sizeof(struct htb_sched),
1565        .enqueue        =       htb_enqueue,
1566        .dequeue        =       htb_dequeue,
1567        .peek           =       qdisc_peek_dequeued,
1568        .drop           =       htb_drop,
1569        .init           =       htb_init,
1570        .reset          =       htb_reset,
1571        .destroy        =       htb_destroy,
1572        .dump           =       htb_dump,
1573        .owner          =       THIS_MODULE,
1574};
1575
1576static int __init htb_module_init(void)
1577{
1578        return register_qdisc(&htb_qdisc_ops);
1579}
1580static void __exit htb_module_exit(void)
1581{
1582        unregister_qdisc(&htb_qdisc_ops);
1583}
1584
1585module_init(htb_module_init)
1586module_exit(htb_module_exit)
1587MODULE_LICENSE("GPL");
1588
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.