linux/net/sched/sch_drr.c
<<
>>
Prefs
   1/*
   2 * net/sched/sch_drr.c         Deficit Round Robin scheduler
   3 *
   4 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * version 2 as published by the Free Software Foundation.
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/slab.h>
  13#include <linux/init.h>
  14#include <linux/errno.h>
  15#include <linux/netdevice.h>
  16#include <linux/pkt_sched.h>
  17#include <net/sch_generic.h>
  18#include <net/pkt_sched.h>
  19#include <net/pkt_cls.h>
  20
  21struct drr_class {
  22        struct Qdisc_class_common       common;
  23        unsigned int                    refcnt;
  24        unsigned int                    filter_cnt;
  25
  26        struct gnet_stats_basic_packed          bstats;
  27        struct gnet_stats_queue         qstats;
  28        struct gnet_stats_rate_est      rate_est;
  29        struct list_head                alist;
  30        struct Qdisc                    *qdisc;
  31
  32        u32                             quantum;
  33        u32                             deficit;
  34};
  35
  36struct drr_sched {
  37        struct list_head                active;
  38        struct tcf_proto                *filter_list;
  39        struct Qdisc_class_hash         clhash;
  40};
  41
  42static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
  43{
  44        struct drr_sched *q = qdisc_priv(sch);
  45        struct Qdisc_class_common *clc;
  46
  47        clc = qdisc_class_find(&q->clhash, classid);
  48        if (clc == NULL)
  49                return NULL;
  50        return container_of(clc, struct drr_class, common);
  51}
  52
  53static void drr_purge_queue(struct drr_class *cl)
  54{
  55        unsigned int len = cl->qdisc->q.qlen;
  56
  57        qdisc_reset(cl->qdisc);
  58        qdisc_tree_decrease_qlen(cl->qdisc, len);
  59}
  60
  61static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
  62        [TCA_DRR_QUANTUM]       = { .type = NLA_U32 },
  63};
  64
  65static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
  66                            struct nlattr **tca, unsigned long *arg)
  67{
  68        struct drr_sched *q = qdisc_priv(sch);
  69        struct drr_class *cl = (struct drr_class *)*arg;
  70        struct nlattr *opt = tca[TCA_OPTIONS];
  71        struct nlattr *tb[TCA_DRR_MAX + 1];
  72        u32 quantum;
  73        int err;
  74
  75        if (!opt)
  76                return -EINVAL;
  77
  78        err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy);
  79        if (err < 0)
  80                return err;
  81
  82        if (tb[TCA_DRR_QUANTUM]) {
  83                quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
  84                if (quantum == 0)
  85                        return -EINVAL;
  86        } else
  87                quantum = psched_mtu(qdisc_dev(sch));
  88
  89        if (cl != NULL) {
  90                if (tca[TCA_RATE]) {
  91                        err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
  92                                                    qdisc_root_sleeping_lock(sch),
  93                                                    tca[TCA_RATE]);
  94                        if (err)
  95                                return err;
  96                }
  97
  98                sch_tree_lock(sch);
  99                if (tb[TCA_DRR_QUANTUM])
 100                        cl->quantum = quantum;
 101                sch_tree_unlock(sch);
 102
 103                return 0;
 104        }
 105
 106        cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL);
 107        if (cl == NULL)
 108                return -ENOBUFS;
 109
 110        cl->refcnt         = 1;
 111        cl->common.classid = classid;
 112        cl->quantum        = quantum;
 113        cl->qdisc          = qdisc_create_dflt(sch->dev_queue,
 114                                               &pfifo_qdisc_ops, classid);
 115        if (cl->qdisc == NULL)
 116                cl->qdisc = &noop_qdisc;
 117
 118        if (tca[TCA_RATE]) {
 119                err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
 120                                            qdisc_root_sleeping_lock(sch),
 121                                            tca[TCA_RATE]);
 122                if (err) {
 123                        qdisc_destroy(cl->qdisc);
 124                        kfree(cl);
 125                        return err;
 126                }
 127        }
 128
 129        sch_tree_lock(sch);
 130        qdisc_class_hash_insert(&q->clhash, &cl->common);
 131        sch_tree_unlock(sch);
 132
 133        qdisc_class_hash_grow(sch, &q->clhash);
 134
 135        *arg = (unsigned long)cl;
 136        return 0;
 137}
 138
 139static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
 140{
 141        gen_kill_estimator(&cl->bstats, &cl->rate_est);
 142        qdisc_destroy(cl->qdisc);
 143        kfree(cl);
 144}
 145
 146static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
 147{
 148        struct drr_sched *q = qdisc_priv(sch);
 149        struct drr_class *cl = (struct drr_class *)arg;
 150
 151        if (cl->filter_cnt > 0)
 152                return -EBUSY;
 153
 154        sch_tree_lock(sch);
 155
 156        drr_purge_queue(cl);
 157        qdisc_class_hash_remove(&q->clhash, &cl->common);
 158
 159        BUG_ON(--cl->refcnt == 0);
 160        /*
 161         * This shouldn't happen: we "hold" one cops->get() when called
 162         * from tc_ctl_tclass; the destroy method is done from cops->put().
 163         */
 164
 165        sch_tree_unlock(sch);
 166        return 0;
 167}
 168
 169static unsigned long drr_get_class(struct Qdisc *sch, u32 classid)
 170{
 171        struct drr_class *cl = drr_find_class(sch, classid);
 172
 173        if (cl != NULL)
 174                cl->refcnt++;
 175
 176        return (unsigned long)cl;
 177}
 178
 179static void drr_put_class(struct Qdisc *sch, unsigned long arg)
 180{
 181        struct drr_class *cl = (struct drr_class *)arg;
 182
 183        if (--cl->refcnt == 0)
 184                drr_destroy_class(sch, cl);
 185}
 186
 187static struct tcf_proto **drr_tcf_chain(struct Qdisc *sch, unsigned long cl)
 188{
 189        struct drr_sched *q = qdisc_priv(sch);
 190
 191        if (cl)
 192                return NULL;
 193
 194        return &q->filter_list;
 195}
 196
 197static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
 198                                  u32 classid)
 199{
 200        struct drr_class *cl = drr_find_class(sch, classid);
 201
 202        if (cl != NULL)
 203                cl->filter_cnt++;
 204
 205        return (unsigned long)cl;
 206}
 207
 208static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
 209{
 210        struct drr_class *cl = (struct drr_class *)arg;
 211
 212        cl->filter_cnt--;
 213}
 214
 215static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
 216                           struct Qdisc *new, struct Qdisc **old)
 217{
 218        struct drr_class *cl = (struct drr_class *)arg;
 219
 220        if (new == NULL) {
 221                new = qdisc_create_dflt(sch->dev_queue,
 222                                        &pfifo_qdisc_ops, cl->common.classid);
 223                if (new == NULL)
 224                        new = &noop_qdisc;
 225        }
 226
 227        sch_tree_lock(sch);
 228        drr_purge_queue(cl);
 229        *old = cl->qdisc;
 230        cl->qdisc = new;
 231        sch_tree_unlock(sch);
 232        return 0;
 233}
 234
 235static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg)
 236{
 237        struct drr_class *cl = (struct drr_class *)arg;
 238
 239        return cl->qdisc;
 240}
 241
 242static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
 243{
 244        struct drr_class *cl = (struct drr_class *)arg;
 245
 246        if (cl->qdisc->q.qlen == 0)
 247                list_del(&cl->alist);
 248}
 249
 250static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
 251                          struct sk_buff *skb, struct tcmsg *tcm)
 252{
 253        struct drr_class *cl = (struct drr_class *)arg;
 254        struct nlattr *nest;
 255
 256        tcm->tcm_parent = TC_H_ROOT;
 257        tcm->tcm_handle = cl->common.classid;
 258        tcm->tcm_info   = cl->qdisc->handle;
 259
 260        nest = nla_nest_start(skb, TCA_OPTIONS);
 261        if (nest == NULL)
 262                goto nla_put_failure;
 263        if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
 264                goto nla_put_failure;
 265        return nla_nest_end(skb, nest);
 266
 267nla_put_failure:
 268        nla_nest_cancel(skb, nest);
 269        return -EMSGSIZE;
 270}
 271
 272static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
 273                                struct gnet_dump *d)
 274{
 275        struct drr_class *cl = (struct drr_class *)arg;
 276        struct tc_drr_stats xstats;
 277
 278        memset(&xstats, 0, sizeof(xstats));
 279        if (cl->qdisc->q.qlen) {
 280                xstats.deficit = cl->deficit;
 281                cl->qdisc->qstats.qlen = cl->qdisc->q.qlen;
 282        }
 283
 284        if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
 285            gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
 286            gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0)
 287                return -1;
 288
 289        return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
 290}
 291
 292static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
 293{
 294        struct drr_sched *q = qdisc_priv(sch);
 295        struct drr_class *cl;
 296        struct hlist_node *n;
 297        unsigned int i;
 298
 299        if (arg->stop)
 300                return;
 301
 302        for (i = 0; i < q->clhash.hashsize; i++) {
 303                hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
 304                        if (arg->count < arg->skip) {
 305                                arg->count++;
 306                                continue;
 307                        }
 308                        if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
 309                                arg->stop = 1;
 310                                return;
 311                        }
 312                        arg->count++;
 313                }
 314        }
 315}
 316
 317static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
 318                                      int *qerr)
 319{
 320        struct drr_sched *q = qdisc_priv(sch);
 321        struct drr_class *cl;
 322        struct tcf_result res;
 323        int result;
 324
 325        if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
 326                cl = drr_find_class(sch, skb->priority);
 327                if (cl != NULL)
 328                        return cl;
 329        }
 330
 331        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 332        result = tc_classify(skb, q->filter_list, &res);
 333        if (result >= 0) {
 334#ifdef CONFIG_NET_CLS_ACT
 335                switch (result) {
 336                case TC_ACT_QUEUED:
 337                case TC_ACT_STOLEN:
 338                        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 339                case TC_ACT_SHOT:
 340                        return NULL;
 341                }
 342#endif
 343                cl = (struct drr_class *)res.class;
 344                if (cl == NULL)
 345                        cl = drr_find_class(sch, res.classid);
 346                return cl;
 347        }
 348        return NULL;
 349}
 350
 351static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 352{
 353        struct drr_sched *q = qdisc_priv(sch);
 354        struct drr_class *cl;
 355        int err;
 356
 357        cl = drr_classify(skb, sch, &err);
 358        if (cl == NULL) {
 359                if (err & __NET_XMIT_BYPASS)
 360                        sch->qstats.drops++;
 361                kfree_skb(skb);
 362                return err;
 363        }
 364
 365        err = qdisc_enqueue(skb, cl->qdisc);
 366        if (unlikely(err != NET_XMIT_SUCCESS)) {
 367                if (net_xmit_drop_count(err)) {
 368                        cl->qstats.drops++;
 369                        sch->qstats.drops++;
 370                }
 371                return err;
 372        }
 373
 374        if (cl->qdisc->q.qlen == 1) {
 375                list_add_tail(&cl->alist, &q->active);
 376                cl->deficit = cl->quantum;
 377        }
 378
 379        sch->q.qlen++;
 380        return err;
 381}
 382
 383static struct sk_buff *drr_dequeue(struct Qdisc *sch)
 384{
 385        struct drr_sched *q = qdisc_priv(sch);
 386        struct drr_class *cl;
 387        struct sk_buff *skb;
 388        unsigned int len;
 389
 390        if (list_empty(&q->active))
 391                goto out;
 392        while (1) {
 393                cl = list_first_entry(&q->active, struct drr_class, alist);
 394                skb = cl->qdisc->ops->peek(cl->qdisc);
 395                if (skb == NULL)
 396                        goto out;
 397
 398                len = qdisc_pkt_len(skb);
 399                if (len <= cl->deficit) {
 400                        cl->deficit -= len;
 401                        skb = qdisc_dequeue_peeked(cl->qdisc);
 402                        if (cl->qdisc->q.qlen == 0)
 403                                list_del(&cl->alist);
 404
 405                        bstats_update(&cl->bstats, skb);
 406                        qdisc_bstats_update(sch, skb);
 407                        sch->q.qlen--;
 408                        return skb;
 409                }
 410
 411                cl->deficit += cl->quantum;
 412                list_move_tail(&cl->alist, &q->active);
 413        }
 414out:
 415        return NULL;
 416}
 417
 418static unsigned int drr_drop(struct Qdisc *sch)
 419{
 420        struct drr_sched *q = qdisc_priv(sch);
 421        struct drr_class *cl;
 422        unsigned int len;
 423
 424        list_for_each_entry(cl, &q->active, alist) {
 425                if (cl->qdisc->ops->drop) {
 426                        len = cl->qdisc->ops->drop(cl->qdisc);
 427                        if (len > 0) {
 428                                sch->q.qlen--;
 429                                if (cl->qdisc->q.qlen == 0)
 430                                        list_del(&cl->alist);
 431                                return len;
 432                        }
 433                }
 434        }
 435        return 0;
 436}
 437
 438static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
 439{
 440        struct drr_sched *q = qdisc_priv(sch);
 441        int err;
 442
 443        err = qdisc_class_hash_init(&q->clhash);
 444        if (err < 0)
 445                return err;
 446        INIT_LIST_HEAD(&q->active);
 447        return 0;
 448}
 449
 450static void drr_reset_qdisc(struct Qdisc *sch)
 451{
 452        struct drr_sched *q = qdisc_priv(sch);
 453        struct drr_class *cl;
 454        struct hlist_node *n;
 455        unsigned int i;
 456
 457        for (i = 0; i < q->clhash.hashsize; i++) {
 458                hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
 459                        if (cl->qdisc->q.qlen)
 460                                list_del(&cl->alist);
 461                        qdisc_reset(cl->qdisc);
 462                }
 463        }
 464        sch->q.qlen = 0;
 465}
 466
 467static void drr_destroy_qdisc(struct Qdisc *sch)
 468{
 469        struct drr_sched *q = qdisc_priv(sch);
 470        struct drr_class *cl;
 471        struct hlist_node *n, *next;
 472        unsigned int i;
 473
 474        tcf_destroy_chain(&q->filter_list);
 475
 476        for (i = 0; i < q->clhash.hashsize; i++) {
 477                hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
 478                                          common.hnode)
 479                        drr_destroy_class(sch, cl);
 480        }
 481        qdisc_class_hash_destroy(&q->clhash);
 482}
 483
 484static const struct Qdisc_class_ops drr_class_ops = {
 485        .change         = drr_change_class,
 486        .delete         = drr_delete_class,
 487        .get            = drr_get_class,
 488        .put            = drr_put_class,
 489        .tcf_chain      = drr_tcf_chain,
 490        .bind_tcf       = drr_bind_tcf,
 491        .unbind_tcf     = drr_unbind_tcf,
 492        .graft          = drr_graft_class,
 493        .leaf           = drr_class_leaf,
 494        .qlen_notify    = drr_qlen_notify,
 495        .dump           = drr_dump_class,
 496        .dump_stats     = drr_dump_class_stats,
 497        .walk           = drr_walk,
 498};
 499
 500static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
 501        .cl_ops         = &drr_class_ops,
 502        .id             = "drr",
 503        .priv_size      = sizeof(struct drr_sched),
 504        .enqueue        = drr_enqueue,
 505        .dequeue        = drr_dequeue,
 506        .peek           = qdisc_peek_dequeued,
 507        .drop           = drr_drop,
 508        .init           = drr_init_qdisc,
 509        .reset          = drr_reset_qdisc,
 510        .destroy        = drr_destroy_qdisc,
 511        .owner          = THIS_MODULE,
 512};
 513
 514static int __init drr_init(void)
 515{
 516        return register_qdisc(&drr_qdisc_ops);
 517}
 518
 519static void __exit drr_exit(void)
 520{
 521        unregister_qdisc(&drr_qdisc_ops);
 522}
 523
 524module_init(drr_init);
 525module_exit(drr_exit);
 526MODULE_LICENSE("GPL");
 527
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.