linux/net/core/fib_rules.c
<<
>>
Prefs
   1/*
   2 * net/core/fib_rules.c         Generic Routing Rules
   3 *
   4 *      This program is free software; you can redistribute it and/or
   5 *      modify it under the terms of the GNU General Public License as
   6 *      published by the Free Software Foundation, version 2.
   7 *
   8 * Authors:     Thomas Graf <tgraf@suug.ch>
   9 */
  10
  11#include <linux/types.h>
  12#include <linux/kernel.h>
  13#include <linux/slab.h>
  14#include <linux/list.h>
  15#include <net/net_namespace.h>
  16#include <net/sock.h>
  17#include <net/fib_rules.h>
  18
  19int fib_default_rule_add(struct fib_rules_ops *ops,
  20                         u32 pref, u32 table, u32 flags)
  21{
  22        struct fib_rule *r;
  23
  24        r = kzalloc(ops->rule_size, GFP_KERNEL);
  25        if (r == NULL)
  26                return -ENOMEM;
  27
  28        atomic_set(&r->refcnt, 1);
  29        r->action = FR_ACT_TO_TBL;
  30        r->pref = pref;
  31        r->table = table;
  32        r->flags = flags;
  33        r->fr_net = hold_net(ops->fro_net);
  34
  35        /* The lock is not required here, the list in unreacheable
  36         * at the moment this function is called */
  37        list_add_tail(&r->list, &ops->rules_list);
  38        return 0;
  39}
  40EXPORT_SYMBOL(fib_default_rule_add);
  41
  42u32 fib_default_rule_pref(struct fib_rules_ops *ops)
  43{
  44        struct list_head *pos;
  45        struct fib_rule *rule;
  46
  47        if (!list_empty(&ops->rules_list)) {
  48                pos = ops->rules_list.next;
  49                if (pos->next != &ops->rules_list) {
  50                        rule = list_entry(pos->next, struct fib_rule, list);
  51                        if (rule->pref)
  52                                return rule->pref - 1;
  53                }
  54        }
  55
  56        return 0;
  57}
  58EXPORT_SYMBOL(fib_default_rule_pref);
  59
  60static void notify_rule_change(int event, struct fib_rule *rule,
  61                               struct fib_rules_ops *ops, struct nlmsghdr *nlh,
  62                               u32 pid);
  63
  64static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
  65{
  66        struct fib_rules_ops *ops;
  67
  68        rcu_read_lock();
  69        list_for_each_entry_rcu(ops, &net->rules_ops, list) {
  70                if (ops->family == family) {
  71                        if (!try_module_get(ops->owner))
  72                                ops = NULL;
  73                        rcu_read_unlock();
  74                        return ops;
  75                }
  76        }
  77        rcu_read_unlock();
  78
  79        return NULL;
  80}
  81
  82static void rules_ops_put(struct fib_rules_ops *ops)
  83{
  84        if (ops)
  85                module_put(ops->owner);
  86}
  87
  88static void flush_route_cache(struct fib_rules_ops *ops)
  89{
  90        if (ops->flush_cache)
  91                ops->flush_cache(ops);
  92}
  93
  94static int __fib_rules_register(struct fib_rules_ops *ops)
  95{
  96        int err = -EEXIST;
  97        struct fib_rules_ops *o;
  98        struct net *net;
  99
 100        net = ops->fro_net;
 101
 102        if (ops->rule_size < sizeof(struct fib_rule))
 103                return -EINVAL;
 104
 105        if (ops->match == NULL || ops->configure == NULL ||
 106            ops->compare == NULL || ops->fill == NULL ||
 107            ops->action == NULL)
 108                return -EINVAL;
 109
 110        spin_lock(&net->rules_mod_lock);
 111        list_for_each_entry(o, &net->rules_ops, list)
 112                if (ops->family == o->family)
 113                        goto errout;
 114
 115        hold_net(net);
 116        list_add_tail_rcu(&ops->list, &net->rules_ops);
 117        err = 0;
 118errout:
 119        spin_unlock(&net->rules_mod_lock);
 120
 121        return err;
 122}
 123
 124struct fib_rules_ops *
 125fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
 126{
 127        struct fib_rules_ops *ops;
 128        int err;
 129
 130        ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
 131        if (ops == NULL)
 132                return ERR_PTR(-ENOMEM);
 133
 134        INIT_LIST_HEAD(&ops->rules_list);
 135        ops->fro_net = net;
 136
 137        err = __fib_rules_register(ops);
 138        if (err) {
 139                kfree(ops);
 140                ops = ERR_PTR(err);
 141        }
 142
 143        return ops;
 144}
 145EXPORT_SYMBOL_GPL(fib_rules_register);
 146
 147void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
 148{
 149        struct fib_rule *rule, *tmp;
 150
 151        list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
 152                list_del_rcu(&rule->list);
 153                fib_rule_put(rule);
 154        }
 155}
 156EXPORT_SYMBOL_GPL(fib_rules_cleanup_ops);
 157
 158static void fib_rules_put_rcu(struct rcu_head *head)
 159{
 160        struct fib_rules_ops *ops = container_of(head, struct fib_rules_ops, rcu);
 161        struct net *net = ops->fro_net;
 162
 163        release_net(net);
 164        kfree(ops);
 165}
 166
 167void fib_rules_unregister(struct fib_rules_ops *ops)
 168{
 169        struct net *net = ops->fro_net;
 170
 171        spin_lock(&net->rules_mod_lock);
 172        list_del_rcu(&ops->list);
 173        fib_rules_cleanup_ops(ops);
 174        spin_unlock(&net->rules_mod_lock);
 175
 176        call_rcu(&ops->rcu, fib_rules_put_rcu);
 177}
 178EXPORT_SYMBOL_GPL(fib_rules_unregister);
 179
 180static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
 181                          struct flowi *fl, int flags)
 182{
 183        int ret = 0;
 184
 185        if (rule->iifindex && (rule->iifindex != fl->iif))
 186                goto out;
 187
 188        if (rule->oifindex && (rule->oifindex != fl->oif))
 189                goto out;
 190
 191        if ((rule->mark ^ fl->mark) & rule->mark_mask)
 192                goto out;
 193
 194        ret = ops->match(rule, fl, flags);
 195out:
 196        return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
 197}
 198
 199int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
 200                     int flags, struct fib_lookup_arg *arg)
 201{
 202        struct fib_rule *rule;
 203        int err;
 204
 205        rcu_read_lock();
 206
 207        list_for_each_entry_rcu(rule, &ops->rules_list, list) {
 208jumped:
 209                if (!fib_rule_match(rule, ops, fl, flags))
 210                        continue;
 211
 212                if (rule->action == FR_ACT_GOTO) {
 213                        struct fib_rule *target;
 214
 215                        target = rcu_dereference(rule->ctarget);
 216                        if (target == NULL) {
 217                                continue;
 218                        } else {
 219                                rule = target;
 220                                goto jumped;
 221                        }
 222                } else if (rule->action == FR_ACT_NOP)
 223                        continue;
 224                else
 225                        err = ops->action(rule, fl, flags, arg);
 226
 227                if (err != -EAGAIN) {
 228                        fib_rule_get(rule);
 229                        arg->rule = rule;
 230                        goto out;
 231                }
 232        }
 233
 234        err = -ESRCH;
 235out:
 236        rcu_read_unlock();
 237
 238        return err;
 239}
 240EXPORT_SYMBOL_GPL(fib_rules_lookup);
 241
 242static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
 243                            struct fib_rules_ops *ops)
 244{
 245        int err = -EINVAL;
 246
 247        if (frh->src_len)
 248                if (tb[FRA_SRC] == NULL ||
 249                    frh->src_len > (ops->addr_size * 8) ||
 250                    nla_len(tb[FRA_SRC]) != ops->addr_size)
 251                        goto errout;
 252
 253        if (frh->dst_len)
 254                if (tb[FRA_DST] == NULL ||
 255                    frh->dst_len > (ops->addr_size * 8) ||
 256                    nla_len(tb[FRA_DST]) != ops->addr_size)
 257                        goto errout;
 258
 259        err = 0;
 260errout:
 261        return err;
 262}
 263
 264static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
 265{
 266        struct net *net = sock_net(skb->sk);
 267        struct fib_rule_hdr *frh = nlmsg_data(nlh);
 268        struct fib_rules_ops *ops = NULL;
 269        struct fib_rule *rule, *r, *last = NULL;
 270        struct nlattr *tb[FRA_MAX+1];
 271        int err = -EINVAL, unresolved = 0;
 272
 273        if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
 274                goto errout;
 275
 276        ops = lookup_rules_ops(net, frh->family);
 277        if (ops == NULL) {
 278                err = -EAFNOSUPPORT;
 279                goto errout;
 280        }
 281
 282        err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
 283        if (err < 0)
 284                goto errout;
 285
 286        err = validate_rulemsg(frh, tb, ops);
 287        if (err < 0)
 288                goto errout;
 289
 290        rule = kzalloc(ops->rule_size, GFP_KERNEL);
 291        if (rule == NULL) {
 292                err = -ENOMEM;
 293                goto errout;
 294        }
 295        rule->fr_net = hold_net(net);
 296
 297        if (tb[FRA_PRIORITY])
 298                rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
 299
 300        if (tb[FRA_IIFNAME]) {
 301                struct net_device *dev;
 302
 303                rule->iifindex = -1;
 304                nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
 305                dev = __dev_get_by_name(net, rule->iifname);
 306                if (dev)
 307                        rule->iifindex = dev->ifindex;
 308        }
 309
 310        if (tb[FRA_OIFNAME]) {
 311                struct net_device *dev;
 312
 313                rule->oifindex = -1;
 314                nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
 315                dev = __dev_get_by_name(net, rule->oifname);
 316                if (dev)
 317                        rule->oifindex = dev->ifindex;
 318        }
 319
 320        if (tb[FRA_FWMARK]) {
 321                rule->mark = nla_get_u32(tb[FRA_FWMARK]);
 322                if (rule->mark)
 323                        /* compatibility: if the mark value is non-zero all bits
 324                         * are compared unless a mask is explicitly specified.
 325                         */
 326                        rule->mark_mask = 0xFFFFFFFF;
 327        }
 328
 329        if (tb[FRA_FWMASK])
 330                rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
 331
 332        rule->action = frh->action;
 333        rule->flags = frh->flags;
 334        rule->table = frh_get_table(frh, tb);
 335
 336        if (!tb[FRA_PRIORITY] && ops->default_pref)
 337                rule->pref = ops->default_pref(ops);
 338
 339        err = -EINVAL;
 340        if (tb[FRA_GOTO]) {
 341                if (rule->action != FR_ACT_GOTO)
 342                        goto errout_free;
 343
 344                rule->target = nla_get_u32(tb[FRA_GOTO]);
 345                /* Backward jumps are prohibited to avoid endless loops */
 346                if (rule->target <= rule->pref)
 347                        goto errout_free;
 348
 349                list_for_each_entry(r, &ops->rules_list, list) {
 350                        if (r->pref == rule->target) {
 351                                rule->ctarget = r;
 352                                break;
 353                        }
 354                }
 355
 356                if (rule->ctarget == NULL)
 357                        unresolved = 1;
 358        } else if (rule->action == FR_ACT_GOTO)
 359                goto errout_free;
 360
 361        err = ops->configure(rule, skb, frh, tb);
 362        if (err < 0)
 363                goto errout_free;
 364
 365        list_for_each_entry(r, &ops->rules_list, list) {
 366                if (r->pref > rule->pref)
 367                        break;
 368                last = r;
 369        }
 370
 371        fib_rule_get(rule);
 372
 373        if (ops->unresolved_rules) {
 374                /*
 375                 * There are unresolved goto rules in the list, check if
 376                 * any of them are pointing to this new rule.
 377                 */
 378                list_for_each_entry(r, &ops->rules_list, list) {
 379                        if (r->action == FR_ACT_GOTO &&
 380                            r->target == rule->pref) {
 381                                BUG_ON(r->ctarget != NULL);
 382                                rcu_assign_pointer(r->ctarget, rule);
 383                                if (--ops->unresolved_rules == 0)
 384                                        break;
 385                        }
 386                }
 387        }
 388
 389        if (rule->action == FR_ACT_GOTO)
 390                ops->nr_goto_rules++;
 391
 392        if (unresolved)
 393                ops->unresolved_rules++;
 394
 395        if (last)
 396                list_add_rcu(&rule->list, &last->list);
 397        else
 398                list_add_rcu(&rule->list, &ops->rules_list);
 399
 400        notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
 401        flush_route_cache(ops);
 402        rules_ops_put(ops);
 403        return 0;
 404
 405errout_free:
 406        release_net(rule->fr_net);
 407        kfree(rule);
 408errout:
 409        rules_ops_put(ops);
 410        return err;
 411}
 412
 413static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
 414{
 415        struct net *net = sock_net(skb->sk);
 416        struct fib_rule_hdr *frh = nlmsg_data(nlh);
 417        struct fib_rules_ops *ops = NULL;
 418        struct fib_rule *rule, *tmp;
 419        struct nlattr *tb[FRA_MAX+1];
 420        int err = -EINVAL;
 421
 422        if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
 423                goto errout;
 424
 425        ops = lookup_rules_ops(net, frh->family);
 426        if (ops == NULL) {
 427                err = -EAFNOSUPPORT;
 428                goto errout;
 429        }
 430
 431        err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
 432        if (err < 0)
 433                goto errout;
 434
 435        err = validate_rulemsg(frh, tb, ops);
 436        if (err < 0)
 437                goto errout;
 438
 439        list_for_each_entry(rule, &ops->rules_list, list) {
 440                if (frh->action && (frh->action != rule->action))
 441                        continue;
 442
 443                if (frh->table && (frh_get_table(frh, tb) != rule->table))
 444                        continue;
 445
 446                if (tb[FRA_PRIORITY] &&
 447                    (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
 448                        continue;
 449
 450                if (tb[FRA_IIFNAME] &&
 451                    nla_strcmp(tb[FRA_IIFNAME], rule->iifname))
 452                        continue;
 453
 454                if (tb[FRA_OIFNAME] &&
 455                    nla_strcmp(tb[FRA_OIFNAME], rule->oifname))
 456                        continue;
 457
 458                if (tb[FRA_FWMARK] &&
 459                    (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
 460                        continue;
 461
 462                if (tb[FRA_FWMASK] &&
 463                    (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
 464                        continue;
 465
 466                if (!ops->compare(rule, frh, tb))
 467                        continue;
 468
 469                if (rule->flags & FIB_RULE_PERMANENT) {
 470                        err = -EPERM;
 471                        goto errout;
 472                }
 473
 474                list_del_rcu(&rule->list);
 475
 476                if (rule->action == FR_ACT_GOTO)
 477                        ops->nr_goto_rules--;
 478
 479                /*
 480                 * Check if this rule is a target to any of them. If so,
 481                 * disable them. As this operation is eventually very
 482                 * expensive, it is only performed if goto rules have
 483                 * actually been added.
 484                 */
 485                if (ops->nr_goto_rules > 0) {
 486                        list_for_each_entry(tmp, &ops->rules_list, list) {
 487                                if (tmp->ctarget == rule) {
 488                                        rcu_assign_pointer(tmp->ctarget, NULL);
 489                                        ops->unresolved_rules++;
 490                                }
 491                        }
 492                }
 493
 494                synchronize_rcu();
 495                notify_rule_change(RTM_DELRULE, rule, ops, nlh,
 496                                   NETLINK_CB(skb).pid);
 497                fib_rule_put(rule);
 498                flush_route_cache(ops);
 499                rules_ops_put(ops);
 500                return 0;
 501        }
 502
 503        err = -ENOENT;
 504errout:
 505        rules_ops_put(ops);
 506        return err;
 507}
 508
 509static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
 510                                         struct fib_rule *rule)
 511{
 512        size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
 513                         + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
 514                         + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
 515                         + nla_total_size(4) /* FRA_PRIORITY */
 516                         + nla_total_size(4) /* FRA_TABLE */
 517                         + nla_total_size(4) /* FRA_FWMARK */
 518                         + nla_total_size(4); /* FRA_FWMASK */
 519
 520        if (ops->nlmsg_payload)
 521                payload += ops->nlmsg_payload(rule);
 522
 523        return payload;
 524}
 525
 526static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
 527                            u32 pid, u32 seq, int type, int flags,
 528                            struct fib_rules_ops *ops)
 529{
 530        struct nlmsghdr *nlh;
 531        struct fib_rule_hdr *frh;
 532
 533        nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
 534        if (nlh == NULL)
 535                return -EMSGSIZE;
 536
 537        frh = nlmsg_data(nlh);
 538        frh->family = ops->family;
 539        frh->table = rule->table;
 540        NLA_PUT_U32(skb, FRA_TABLE, rule->table);
 541        frh->res1 = 0;
 542        frh->res2 = 0;
 543        frh->action = rule->action;
 544        frh->flags = rule->flags;
 545
 546        if (rule->action == FR_ACT_GOTO && rule->ctarget == NULL)
 547                frh->flags |= FIB_RULE_UNRESOLVED;
 548
 549        if (rule->iifname[0]) {
 550                NLA_PUT_STRING(skb, FRA_IIFNAME, rule->iifname);
 551
 552                if (rule->iifindex == -1)
 553                        frh->flags |= FIB_RULE_IIF_DETACHED;
 554        }
 555
 556        if (rule->oifname[0]) {
 557                NLA_PUT_STRING(skb, FRA_OIFNAME, rule->oifname);
 558
 559                if (rule->oifindex == -1)
 560                        frh->flags |= FIB_RULE_OIF_DETACHED;
 561        }
 562
 563        if (rule->pref)
 564                NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref);
 565
 566        if (rule->mark)
 567                NLA_PUT_U32(skb, FRA_FWMARK, rule->mark);
 568
 569        if (rule->mark_mask || rule->mark)
 570                NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask);
 571
 572        if (rule->target)
 573                NLA_PUT_U32(skb, FRA_GOTO, rule->target);
 574
 575        if (ops->fill(rule, skb, frh) < 0)
 576                goto nla_put_failure;
 577
 578        return nlmsg_end(skb, nlh);
 579
 580nla_put_failure:
 581        nlmsg_cancel(skb, nlh);
 582        return -EMSGSIZE;
 583}
 584
 585static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
 586                      struct fib_rules_ops *ops)
 587{
 588        int idx = 0;
 589        struct fib_rule *rule;
 590
 591        list_for_each_entry(rule, &ops->rules_list, list) {
 592                if (idx < cb->args[1])
 593                        goto skip;
 594
 595                if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid,
 596                                     cb->nlh->nlmsg_seq, RTM_NEWRULE,
 597                                     NLM_F_MULTI, ops) < 0)
 598                        break;
 599skip:
 600                idx++;
 601        }
 602        cb->args[1] = idx;
 603        rules_ops_put(ops);
 604
 605        return skb->len;
 606}
 607
 608static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
 609{
 610        struct net *net = sock_net(skb->sk);
 611        struct fib_rules_ops *ops;
 612        int idx = 0, family;
 613
 614        family = rtnl_msg_family(cb->nlh);
 615        if (family != AF_UNSPEC) {
 616                /* Protocol specific dump request */
 617                ops = lookup_rules_ops(net, family);
 618                if (ops == NULL)
 619                        return -EAFNOSUPPORT;
 620
 621                return dump_rules(skb, cb, ops);
 622        }
 623
 624        rcu_read_lock();
 625        list_for_each_entry_rcu(ops, &net->rules_ops, list) {
 626                if (idx < cb->args[0] || !try_module_get(ops->owner))
 627                        goto skip;
 628
 629                if (dump_rules(skb, cb, ops) < 0)
 630                        break;
 631
 632                cb->args[1] = 0;
 633skip:
 634                idx++;
 635        }
 636        rcu_read_unlock();
 637        cb->args[0] = idx;
 638
 639        return skb->len;
 640}
 641
 642static void notify_rule_change(int event, struct fib_rule *rule,
 643                               struct fib_rules_ops *ops, struct nlmsghdr *nlh,
 644                               u32 pid)
 645{
 646        struct net *net;
 647        struct sk_buff *skb;
 648        int err = -ENOBUFS;
 649
 650        net = ops->fro_net;
 651        skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
 652        if (skb == NULL)
 653                goto errout;
 654
 655        err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
 656        if (err < 0) {
 657                /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
 658                WARN_ON(err == -EMSGSIZE);
 659                kfree_skb(skb);
 660                goto errout;
 661        }
 662
 663        rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
 664        return;
 665errout:
 666        if (err < 0)
 667                rtnl_set_sk_err(net, ops->nlgroup, err);
 668}
 669
 670static void attach_rules(struct list_head *rules, struct net_device *dev)
 671{
 672        struct fib_rule *rule;
 673
 674        list_for_each_entry(rule, rules, list) {
 675                if (rule->iifindex == -1 &&
 676                    strcmp(dev->name, rule->iifname) == 0)
 677                        rule->iifindex = dev->ifindex;
 678                if (rule->oifindex == -1 &&
 679                    strcmp(dev->name, rule->oifname) == 0)
 680                        rule->oifindex = dev->ifindex;
 681        }
 682}
 683
 684static void detach_rules(struct list_head *rules, struct net_device *dev)
 685{
 686        struct fib_rule *rule;
 687
 688        list_for_each_entry(rule, rules, list) {
 689                if (rule->iifindex == dev->ifindex)
 690                        rule->iifindex = -1;
 691                if (rule->oifindex == dev->ifindex)
 692                        rule->oifindex = -1;
 693        }
 694}
 695
 696
 697static int fib_rules_event(struct notifier_block *this, unsigned long event,
 698                            void *ptr)
 699{
 700        struct net_device *dev = ptr;
 701        struct net *net = dev_net(dev);
 702        struct fib_rules_ops *ops;
 703
 704        ASSERT_RTNL();
 705
 706        switch (event) {
 707        case NETDEV_REGISTER:
 708                list_for_each_entry(ops, &net->rules_ops, list)
 709                        attach_rules(&ops->rules_list, dev);
 710                break;
 711
 712        case NETDEV_UNREGISTER:
 713                list_for_each_entry(ops, &net->rules_ops, list)
 714                        detach_rules(&ops->rules_list, dev);
 715                break;
 716        }
 717
 718        return NOTIFY_DONE;
 719}
 720
 721static struct notifier_block fib_rules_notifier = {
 722        .notifier_call = fib_rules_event,
 723};
 724
 725static int __net_init fib_rules_net_init(struct net *net)
 726{
 727        INIT_LIST_HEAD(&net->rules_ops);
 728        spin_lock_init(&net->rules_mod_lock);
 729        return 0;
 730}
 731
 732static struct pernet_operations fib_rules_net_ops = {
 733        .init = fib_rules_net_init,
 734};
 735
 736static int __init fib_rules_init(void)
 737{
 738        int err;
 739        rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL);
 740        rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL);
 741        rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule);
 742
 743        err = register_pernet_subsys(&fib_rules_net_ops);
 744        if (err < 0)
 745                goto fail;
 746
 747        err = register_netdevice_notifier(&fib_rules_notifier);
 748        if (err < 0)
 749                goto fail_unregister;
 750
 751        return 0;
 752
 753fail_unregister:
 754        unregister_pernet_subsys(&fib_rules_net_ops);
 755fail:
 756        rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
 757        rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
 758        rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
 759        return err;
 760}
 761
 762subsys_initcall(fib_rules_init);
 763