linux/net/sched/cls_flow.c
<<
>>
Prefs
   1/*
   2 * net/sched/cls_flow.c         Generic flow classifier
   3 *
   4 * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; either version 2
   9 * of the License, or (at your option) any later version.
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/init.h>
  14#include <linux/list.h>
  15#include <linux/jhash.h>
  16#include <linux/random.h>
  17#include <linux/pkt_cls.h>
  18#include <linux/skbuff.h>
  19#include <linux/in.h>
  20#include <linux/ip.h>
  21#include <linux/ipv6.h>
  22#include <linux/if_vlan.h>
  23#include <linux/slab.h>
  24
  25#include <net/pkt_cls.h>
  26#include <net/ip.h>
  27#include <net/route.h>
  28#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
  29#include <net/netfilter/nf_conntrack.h>
  30#endif
  31
  32struct flow_head {
  33        struct list_head        filters;
  34};
  35
  36struct flow_filter {
  37        struct list_head        list;
  38        struct tcf_exts         exts;
  39        struct tcf_ematch_tree  ematches;
  40        struct timer_list       perturb_timer;
  41        u32                     perturb_period;
  42        u32                     handle;
  43
  44        u32                     nkeys;
  45        u32                     keymask;
  46        u32                     mode;
  47        u32                     mask;
  48        u32                     xor;
  49        u32                     rshift;
  50        u32                     addend;
  51        u32                     divisor;
  52        u32                     baseclass;
  53        u32                     hashrnd;
  54};
  55
  56static const struct tcf_ext_map flow_ext_map = {
  57        .action = TCA_FLOW_ACT,
  58        .police = TCA_FLOW_POLICE,
  59};
  60
  61static inline u32 addr_fold(void *addr)
  62{
  63        unsigned long a = (unsigned long)addr;
  64
  65        return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
  66}
  67
  68static u32 flow_get_src(struct sk_buff *skb)
  69{
  70        switch (skb->protocol) {
  71        case htons(ETH_P_IP):
  72                if (pskb_network_may_pull(skb, sizeof(struct iphdr)))
  73                        return ntohl(ip_hdr(skb)->saddr);
  74                break;
  75        case htons(ETH_P_IPV6):
  76                if (pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
  77                        return ntohl(ipv6_hdr(skb)->saddr.s6_addr32[3]);
  78                break;
  79        }
  80
  81        return addr_fold(skb->sk);
  82}
  83
  84static u32 flow_get_dst(struct sk_buff *skb)
  85{
  86        switch (skb->protocol) {
  87        case htons(ETH_P_IP):
  88                if (pskb_network_may_pull(skb, sizeof(struct iphdr)))
  89                        return ntohl(ip_hdr(skb)->daddr);
  90                break;
  91        case htons(ETH_P_IPV6):
  92                if (pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
  93                        return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]);
  94                break;
  95        }
  96
  97        return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
  98}
  99
 100static u32 flow_get_proto(struct sk_buff *skb)
 101{
 102        switch (skb->protocol) {
 103        case htons(ETH_P_IP):
 104                return pskb_network_may_pull(skb, sizeof(struct iphdr)) ?
 105                       ip_hdr(skb)->protocol : 0;
 106        case htons(ETH_P_IPV6):
 107                return pskb_network_may_pull(skb, sizeof(struct ipv6hdr)) ?
 108                       ipv6_hdr(skb)->nexthdr : 0;
 109        default:
 110                return 0;
 111        }
 112}
 113
 114static int has_ports(u8 protocol)
 115{
 116        switch (protocol) {
 117        case IPPROTO_TCP:
 118        case IPPROTO_UDP:
 119        case IPPROTO_UDPLITE:
 120        case IPPROTO_SCTP:
 121        case IPPROTO_DCCP:
 122        case IPPROTO_ESP:
 123                return 1;
 124        default:
 125                return 0;
 126        }
 127}
 128
 129static u32 flow_get_proto_src(struct sk_buff *skb)
 130{
 131        switch (skb->protocol) {
 132        case htons(ETH_P_IP): {
 133                struct iphdr *iph;
 134
 135                if (!pskb_network_may_pull(skb, sizeof(*iph)))
 136                        break;
 137                iph = ip_hdr(skb);
 138                if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
 139                    has_ports(iph->protocol) &&
 140                    pskb_network_may_pull(skb, iph->ihl * 4 + 2))
 141                        return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4));
 142                break;
 143        }
 144        case htons(ETH_P_IPV6): {
 145                struct ipv6hdr *iph;
 146
 147                if (!pskb_network_may_pull(skb, sizeof(*iph) + 2))
 148                        break;
 149                iph = ipv6_hdr(skb);
 150                if (has_ports(iph->nexthdr))
 151                        return ntohs(*(__be16 *)&iph[1]);
 152                break;
 153        }
 154        }
 155
 156        return addr_fold(skb->sk);
 157}
 158
 159static u32 flow_get_proto_dst(struct sk_buff *skb)
 160{
 161        switch (skb->protocol) {
 162        case htons(ETH_P_IP): {
 163                struct iphdr *iph;
 164
 165                if (!pskb_network_may_pull(skb, sizeof(*iph)))
 166                        break;
 167                iph = ip_hdr(skb);
 168                if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
 169                    has_ports(iph->protocol) &&
 170                    pskb_network_may_pull(skb, iph->ihl * 4 + 4))
 171                        return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 2));
 172                break;
 173        }
 174        case htons(ETH_P_IPV6): {
 175                struct ipv6hdr *iph;
 176
 177                if (!pskb_network_may_pull(skb, sizeof(*iph) + 4))
 178                        break;
 179                iph = ipv6_hdr(skb);
 180                if (has_ports(iph->nexthdr))
 181                        return ntohs(*(__be16 *)((void *)&iph[1] + 2));
 182                break;
 183        }
 184        }
 185
 186        return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
 187}
 188
 189static u32 flow_get_iif(const struct sk_buff *skb)
 190{
 191        return skb->skb_iif;
 192}
 193
 194static u32 flow_get_priority(const struct sk_buff *skb)
 195{
 196        return skb->priority;
 197}
 198
 199static u32 flow_get_mark(const struct sk_buff *skb)
 200{
 201        return skb->mark;
 202}
 203
 204static u32 flow_get_nfct(const struct sk_buff *skb)
 205{
 206#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 207        return addr_fold(skb->nfct);
 208#else
 209        return 0;
 210#endif
 211}
 212
 213#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 214#define CTTUPLE(skb, member)                                            \
 215({                                                                      \
 216        enum ip_conntrack_info ctinfo;                                  \
 217        struct nf_conn *ct = nf_ct_get(skb, &ctinfo);                   \
 218        if (ct == NULL)                                                 \
 219                goto fallback;                                          \
 220        ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member;                 \
 221})
 222#else
 223#define CTTUPLE(skb, member)                                            \
 224({                                                                      \
 225        goto fallback;                                                  \
 226        0;                                                              \
 227})
 228#endif
 229
 230static u32 flow_get_nfct_src(struct sk_buff *skb)
 231{
 232        switch (skb->protocol) {
 233        case htons(ETH_P_IP):
 234                return ntohl(CTTUPLE(skb, src.u3.ip));
 235        case htons(ETH_P_IPV6):
 236                return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
 237        }
 238fallback:
 239        return flow_get_src(skb);
 240}
 241
 242static u32 flow_get_nfct_dst(struct sk_buff *skb)
 243{
 244        switch (skb->protocol) {
 245        case htons(ETH_P_IP):
 246                return ntohl(CTTUPLE(skb, dst.u3.ip));
 247        case htons(ETH_P_IPV6):
 248                return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
 249        }
 250fallback:
 251        return flow_get_dst(skb);
 252}
 253
 254static u32 flow_get_nfct_proto_src(struct sk_buff *skb)
 255{
 256        return ntohs(CTTUPLE(skb, src.u.all));
 257fallback:
 258        return flow_get_proto_src(skb);
 259}
 260
 261static u32 flow_get_nfct_proto_dst(struct sk_buff *skb)
 262{
 263        return ntohs(CTTUPLE(skb, dst.u.all));
 264fallback:
 265        return flow_get_proto_dst(skb);
 266}
 267
 268static u32 flow_get_rtclassid(const struct sk_buff *skb)
 269{
 270#ifdef CONFIG_NET_CLS_ROUTE
 271        if (skb_dst(skb))
 272                return skb_dst(skb)->tclassid;
 273#endif
 274        return 0;
 275}
 276
 277static u32 flow_get_skuid(const struct sk_buff *skb)
 278{
 279        if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file)
 280                return skb->sk->sk_socket->file->f_cred->fsuid;
 281        return 0;
 282}
 283
 284static u32 flow_get_skgid(const struct sk_buff *skb)
 285{
 286        if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file)
 287                return skb->sk->sk_socket->file->f_cred->fsgid;
 288        return 0;
 289}
 290
 291static u32 flow_get_vlan_tag(const struct sk_buff *skb)
 292{
 293        u16 uninitialized_var(tag);
 294
 295        if (vlan_get_tag(skb, &tag) < 0)
 296                return 0;
 297        return tag & VLAN_VID_MASK;
 298}
 299
 300static u32 flow_key_get(struct sk_buff *skb, int key)
 301{
 302        switch (key) {
 303        case FLOW_KEY_SRC:
 304                return flow_get_src(skb);
 305        case FLOW_KEY_DST:
 306                return flow_get_dst(skb);
 307        case FLOW_KEY_PROTO:
 308                return flow_get_proto(skb);
 309        case FLOW_KEY_PROTO_SRC:
 310                return flow_get_proto_src(skb);
 311        case FLOW_KEY_PROTO_DST:
 312                return flow_get_proto_dst(skb);
 313        case FLOW_KEY_IIF:
 314                return flow_get_iif(skb);
 315        case FLOW_KEY_PRIORITY:
 316                return flow_get_priority(skb);
 317        case FLOW_KEY_MARK:
 318                return flow_get_mark(skb);
 319        case FLOW_KEY_NFCT:
 320                return flow_get_nfct(skb);
 321        case FLOW_KEY_NFCT_SRC:
 322                return flow_get_nfct_src(skb);
 323        case FLOW_KEY_NFCT_DST:
 324                return flow_get_nfct_dst(skb);
 325        case FLOW_KEY_NFCT_PROTO_SRC:
 326                return flow_get_nfct_proto_src(skb);
 327        case FLOW_KEY_NFCT_PROTO_DST:
 328                return flow_get_nfct_proto_dst(skb);
 329        case FLOW_KEY_RTCLASSID:
 330                return flow_get_rtclassid(skb);
 331        case FLOW_KEY_SKUID:
 332                return flow_get_skuid(skb);
 333        case FLOW_KEY_SKGID:
 334                return flow_get_skgid(skb);
 335        case FLOW_KEY_VLAN_TAG:
 336                return flow_get_vlan_tag(skb);
 337        default:
 338                WARN_ON(1);
 339                return 0;
 340        }
 341}
 342
 343static int flow_classify(struct sk_buff *skb, struct tcf_proto *tp,
 344                         struct tcf_result *res)
 345{
 346        struct flow_head *head = tp->root;
 347        struct flow_filter *f;
 348        u32 keymask;
 349        u32 classid;
 350        unsigned int n, key;
 351        int r;
 352
 353        list_for_each_entry(f, &head->filters, list) {
 354                u32 keys[f->nkeys];
 355
 356                if (!tcf_em_tree_match(skb, &f->ematches, NULL))
 357                        continue;
 358
 359                keymask = f->keymask;
 360
 361                for (n = 0; n < f->nkeys; n++) {
 362                        key = ffs(keymask) - 1;
 363                        keymask &= ~(1 << key);
 364                        keys[n] = flow_key_get(skb, key);
 365                }
 366
 367                if (f->mode == FLOW_MODE_HASH)
 368                        classid = jhash2(keys, f->nkeys, f->hashrnd);
 369                else {
 370                        classid = keys[0];
 371                        classid = (classid & f->mask) ^ f->xor;
 372                        classid = (classid >> f->rshift) + f->addend;
 373                }
 374
 375                if (f->divisor)
 376                        classid %= f->divisor;
 377
 378                res->class   = 0;
 379                res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid);
 380
 381                r = tcf_exts_exec(skb, &f->exts, res);
 382                if (r < 0)
 383                        continue;
 384                return r;
 385        }
 386        return -1;
 387}
 388
 389static void flow_perturbation(unsigned long arg)
 390{
 391        struct flow_filter *f = (struct flow_filter *)arg;
 392
 393        get_random_bytes(&f->hashrnd, 4);
 394        if (f->perturb_period)
 395                mod_timer(&f->perturb_timer, jiffies + f->perturb_period);
 396}
 397
 398static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
 399        [TCA_FLOW_KEYS]         = { .type = NLA_U32 },
 400        [TCA_FLOW_MODE]         = { .type = NLA_U32 },
 401        [TCA_FLOW_BASECLASS]    = { .type = NLA_U32 },
 402        [TCA_FLOW_RSHIFT]       = { .type = NLA_U32 },
 403        [TCA_FLOW_ADDEND]       = { .type = NLA_U32 },
 404        [TCA_FLOW_MASK]         = { .type = NLA_U32 },
 405        [TCA_FLOW_XOR]          = { .type = NLA_U32 },
 406        [TCA_FLOW_DIVISOR]      = { .type = NLA_U32 },
 407        [TCA_FLOW_ACT]          = { .type = NLA_NESTED },
 408        [TCA_FLOW_POLICE]       = { .type = NLA_NESTED },
 409        [TCA_FLOW_EMATCHES]     = { .type = NLA_NESTED },
 410        [TCA_FLOW_PERTURB]      = { .type = NLA_U32 },
 411};
 412
 413static int flow_change(struct tcf_proto *tp, unsigned long base,
 414                       u32 handle, struct nlattr **tca,
 415                       unsigned long *arg)
 416{
 417        struct flow_head *head = tp->root;
 418        struct flow_filter *f;
 419        struct nlattr *opt = tca[TCA_OPTIONS];
 420        struct nlattr *tb[TCA_FLOW_MAX + 1];
 421        struct tcf_exts e;
 422        struct tcf_ematch_tree t;
 423        unsigned int nkeys = 0;
 424        unsigned int perturb_period = 0;
 425        u32 baseclass = 0;
 426        u32 keymask = 0;
 427        u32 mode;
 428        int err;
 429
 430        if (opt == NULL)
 431                return -EINVAL;
 432
 433        err = nla_parse_nested(tb, TCA_FLOW_MAX, opt, flow_policy);
 434        if (err < 0)
 435                return err;
 436
 437        if (tb[TCA_FLOW_BASECLASS]) {
 438                baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
 439                if (TC_H_MIN(baseclass) == 0)
 440                        return -EINVAL;
 441        }
 442
 443        if (tb[TCA_FLOW_KEYS]) {
 444                keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
 445
 446                nkeys = hweight32(keymask);
 447                if (nkeys == 0)
 448                        return -EINVAL;
 449
 450                if (fls(keymask) - 1 > FLOW_KEY_MAX)
 451                        return -EOPNOTSUPP;
 452        }
 453
 454        err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &flow_ext_map);
 455        if (err < 0)
 456                return err;
 457
 458        err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &t);
 459        if (err < 0)
 460                goto err1;
 461
 462        f = (struct flow_filter *)*arg;
 463        if (f != NULL) {
 464                err = -EINVAL;
 465                if (f->handle != handle && handle)
 466                        goto err2;
 467
 468                mode = f->mode;
 469                if (tb[TCA_FLOW_MODE])
 470                        mode = nla_get_u32(tb[TCA_FLOW_MODE]);
 471                if (mode != FLOW_MODE_HASH && nkeys > 1)
 472                        goto err2;
 473
 474                if (mode == FLOW_MODE_HASH)
 475                        perturb_period = f->perturb_period;
 476                if (tb[TCA_FLOW_PERTURB]) {
 477                        if (mode != FLOW_MODE_HASH)
 478                                goto err2;
 479                        perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
 480                }
 481        } else {
 482                err = -EINVAL;
 483                if (!handle)
 484                        goto err2;
 485                if (!tb[TCA_FLOW_KEYS])
 486                        goto err2;
 487
 488                mode = FLOW_MODE_MAP;
 489                if (tb[TCA_FLOW_MODE])
 490                        mode = nla_get_u32(tb[TCA_FLOW_MODE]);
 491                if (mode != FLOW_MODE_HASH && nkeys > 1)
 492                        goto err2;
 493
 494                if (tb[TCA_FLOW_PERTURB]) {
 495                        if (mode != FLOW_MODE_HASH)
 496                                goto err2;
 497                        perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
 498                }
 499
 500                if (TC_H_MAJ(baseclass) == 0)
 501                        baseclass = TC_H_MAKE(tp->q->handle, baseclass);
 502                if (TC_H_MIN(baseclass) == 0)
 503                        baseclass = TC_H_MAKE(baseclass, 1);
 504
 505                err = -ENOBUFS;
 506                f = kzalloc(sizeof(*f), GFP_KERNEL);
 507                if (f == NULL)
 508                        goto err2;
 509
 510                f->handle = handle;
 511                f->mask   = ~0U;
 512
 513                get_random_bytes(&f->hashrnd, 4);
 514                f->perturb_timer.function = flow_perturbation;
 515                f->perturb_timer.data = (unsigned long)f;
 516                init_timer_deferrable(&f->perturb_timer);
 517        }
 518
 519        tcf_exts_change(tp, &f->exts, &e);
 520        tcf_em_tree_change(tp, &f->ematches, &t);
 521
 522        tcf_tree_lock(tp);
 523
 524        if (tb[TCA_FLOW_KEYS]) {
 525                f->keymask = keymask;
 526                f->nkeys   = nkeys;
 527        }
 528
 529        f->mode = mode;
 530
 531        if (tb[TCA_FLOW_MASK])
 532                f->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
 533        if (tb[TCA_FLOW_XOR])
 534                f->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
 535        if (tb[TCA_FLOW_RSHIFT])
 536                f->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
 537        if (tb[TCA_FLOW_ADDEND])
 538                f->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
 539
 540        if (tb[TCA_FLOW_DIVISOR])
 541                f->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
 542        if (baseclass)
 543                f->baseclass = baseclass;
 544
 545        f->perturb_period = perturb_period;
 546        del_timer(&f->perturb_timer);
 547        if (perturb_period)
 548                mod_timer(&f->perturb_timer, jiffies + perturb_period);
 549
 550        if (*arg == 0)
 551                list_add_tail(&f->list, &head->filters);
 552
 553        tcf_tree_unlock(tp);
 554
 555        *arg = (unsigned long)f;
 556        return 0;
 557
 558err2:
 559        tcf_em_tree_destroy(tp, &t);
 560err1:
 561        tcf_exts_destroy(tp, &e);
 562        return err;
 563}
 564
 565static void flow_destroy_filter(struct tcf_proto *tp, struct flow_filter *f)
 566{
 567        del_timer_sync(&f->perturb_timer);
 568        tcf_exts_destroy(tp, &f->exts);
 569        tcf_em_tree_destroy(tp, &f->ematches);
 570        kfree(f);
 571}
 572
 573static int flow_delete(struct tcf_proto *tp, unsigned long arg)
 574{
 575        struct flow_filter *f = (struct flow_filter *)arg;
 576
 577        tcf_tree_lock(tp);
 578        list_del(&f->list);
 579        tcf_tree_unlock(tp);
 580        flow_destroy_filter(tp, f);
 581        return 0;
 582}
 583
 584static int flow_init(struct tcf_proto *tp)
 585{
 586        struct flow_head *head;
 587
 588        head = kzalloc(sizeof(*head), GFP_KERNEL);
 589        if (head == NULL)
 590                return -ENOBUFS;
 591        INIT_LIST_HEAD(&head->filters);
 592        tp->root = head;
 593        return 0;
 594}
 595
 596static void flow_destroy(struct tcf_proto *tp)
 597{
 598        struct flow_head *head = tp->root;
 599        struct flow_filter *f, *next;
 600
 601        list_for_each_entry_safe(f, next, &head->filters, list) {
 602                list_del(&f->list);
 603                flow_destroy_filter(tp, f);
 604        }
 605        kfree(head);
 606}
 607
 608static unsigned long flow_get(struct tcf_proto *tp, u32 handle)
 609{
 610        struct flow_head *head = tp->root;
 611        struct flow_filter *f;
 612
 613        list_for_each_entry(f, &head->filters, list)
 614                if (f->handle == handle)
 615                        return (unsigned long)f;
 616        return 0;
 617}
 618
 619static void flow_put(struct tcf_proto *tp, unsigned long f)
 620{
 621}
 622
 623static int flow_dump(struct tcf_proto *tp, unsigned long fh,
 624                     struct sk_buff *skb, struct tcmsg *t)
 625{
 626        struct flow_filter *f = (struct flow_filter *)fh;
 627        struct nlattr *nest;
 628
 629        if (f == NULL)
 630                return skb->len;
 631
 632        t->tcm_handle = f->handle;
 633
 634        nest = nla_nest_start(skb, TCA_OPTIONS);
 635        if (nest == NULL)
 636                goto nla_put_failure;
 637
 638        NLA_PUT_U32(skb, TCA_FLOW_KEYS, f->keymask);
 639        NLA_PUT_U32(skb, TCA_FLOW_MODE, f->mode);
 640
 641        if (f->mask != ~0 || f->xor != 0) {
 642                NLA_PUT_U32(skb, TCA_FLOW_MASK, f->mask);
 643                NLA_PUT_U32(skb, TCA_FLOW_XOR, f->xor);
 644        }
 645        if (f->rshift)
 646                NLA_PUT_U32(skb, TCA_FLOW_RSHIFT, f->rshift);
 647        if (f->addend)
 648                NLA_PUT_U32(skb, TCA_FLOW_ADDEND, f->addend);
 649
 650        if (f->divisor)
 651                NLA_PUT_U32(skb, TCA_FLOW_DIVISOR, f->divisor);
 652        if (f->baseclass)
 653                NLA_PUT_U32(skb, TCA_FLOW_BASECLASS, f->baseclass);
 654
 655        if (f->perturb_period)
 656                NLA_PUT_U32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ);
 657
 658        if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0)
 659                goto nla_put_failure;
 660#ifdef CONFIG_NET_EMATCH
 661        if (f->ematches.hdr.nmatches &&
 662            tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
 663                goto nla_put_failure;
 664#endif
 665        nla_nest_end(skb, nest);
 666
 667        if (tcf_exts_dump_stats(skb, &f->exts, &flow_ext_map) < 0)
 668                goto nla_put_failure;
 669
 670        return skb->len;
 671
 672nla_put_failure:
 673        nlmsg_trim(skb, nest);
 674        return -1;
 675}
 676
 677static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 678{
 679        struct flow_head *head = tp->root;
 680        struct flow_filter *f;
 681
 682        list_for_each_entry(f, &head->filters, list) {
 683                if (arg->count < arg->skip)
 684                        goto skip;
 685                if (arg->fn(tp, (unsigned long)f, arg) < 0) {
 686                        arg->stop = 1;
 687                        break;
 688                }
 689skip:
 690                arg->count++;
 691        }
 692}
 693
 694static struct tcf_proto_ops cls_flow_ops __read_mostly = {
 695        .kind           = "flow",
 696        .classify       = flow_classify,
 697        .init           = flow_init,
 698        .destroy        = flow_destroy,
 699        .change         = flow_change,
 700        .delete         = flow_delete,
 701        .get            = flow_get,
 702        .put            = flow_put,
 703        .dump           = flow_dump,
 704        .walk           = flow_walk,
 705        .owner          = THIS_MODULE,
 706};
 707
 708static int __init cls_flow_init(void)
 709{
 710        return register_tcf_proto_ops(&cls_flow_ops);
 711}
 712
 713static void __exit cls_flow_exit(void)
 714{
 715        unregister_tcf_proto_ops(&cls_flow_ops);
 716}
 717
 718module_init(cls_flow_init);
 719module_exit(cls_flow_exit);
 720
 721MODULE_LICENSE("GPL");
 722MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 723MODULE_DESCRIPTION("TC flow classifier");
 724