linux/net/ipv6/ip6_flowlabel.c
<<
>>
Prefs
   1/*
   2 *      ip6_flowlabel.c         IPv6 flowlabel manager.
   3 *
   4 *      This program is free software; you can redistribute it and/or
   5 *      modify it under the terms of the GNU General Public License
   6 *      as published by the Free Software Foundation; either version
   7 *      2 of the License, or (at your option) any later version.
   8 *
   9 *      Authors:        Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10 */
  11
  12#include <linux/capability.h>
  13#include <linux/errno.h>
  14#include <linux/types.h>
  15#include <linux/socket.h>
  16#include <linux/net.h>
  17#include <linux/netdevice.h>
  18#include <linux/if_arp.h>
  19#include <linux/in6.h>
  20#include <linux/route.h>
  21#include <linux/proc_fs.h>
  22#include <linux/seq_file.h>
  23#include <linux/slab.h>
  24#include <linux/export.h>
  25
  26#include <net/net_namespace.h>
  27#include <net/sock.h>
  28
  29#include <net/ipv6.h>
  30#include <net/ndisc.h>
  31#include <net/protocol.h>
  32#include <net/ip6_route.h>
  33#include <net/addrconf.h>
  34#include <net/rawv6.h>
  35#include <net/icmp.h>
  36#include <net/transp_v6.h>
  37
  38#include <asm/uaccess.h>
  39
  40#define FL_MIN_LINGER   6       /* Minimal linger. It is set to 6sec specified
  41                                   in old IPv6 RFC. Well, it was reasonable value.
  42                                 */
  43#define FL_MAX_LINGER   60      /* Maximal linger timeout */
  44
  45/* FL hash table */
  46
  47#define FL_MAX_PER_SOCK 32
  48#define FL_MAX_SIZE     4096
  49#define FL_HASH_MASK    255
  50#define FL_HASH(l)      (ntohl(l)&FL_HASH_MASK)
  51
  52static atomic_t fl_size = ATOMIC_INIT(0);
  53static struct ip6_flowlabel *fl_ht[FL_HASH_MASK+1];
  54
  55static void ip6_fl_gc(unsigned long dummy);
  56static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc, 0, 0);
  57
  58/* FL hash table lock: it protects only of GC */
  59
  60static DEFINE_RWLOCK(ip6_fl_lock);
  61
  62/* Big socket sock */
  63
  64static DEFINE_RWLOCK(ip6_sk_fl_lock);
  65
  66
  67static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
  68{
  69        struct ip6_flowlabel *fl;
  70
  71        for (fl=fl_ht[FL_HASH(label)]; fl; fl = fl->next) {
  72                if (fl->label == label && net_eq(fl->fl_net, net))
  73                        return fl;
  74        }
  75        return NULL;
  76}
  77
  78static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
  79{
  80        struct ip6_flowlabel *fl;
  81
  82        read_lock_bh(&ip6_fl_lock);
  83        fl = __fl_lookup(net, label);
  84        if (fl)
  85                atomic_inc(&fl->users);
  86        read_unlock_bh(&ip6_fl_lock);
  87        return fl;
  88}
  89
  90
  91static void fl_free(struct ip6_flowlabel *fl)
  92{
  93        if (fl) {
  94                release_net(fl->fl_net);
  95                kfree(fl->opt);
  96        }
  97        kfree(fl);
  98}
  99
 100static void fl_release(struct ip6_flowlabel *fl)
 101{
 102        write_lock_bh(&ip6_fl_lock);
 103
 104        fl->lastuse = jiffies;
 105        if (atomic_dec_and_test(&fl->users)) {
 106                unsigned long ttd = fl->lastuse + fl->linger;
 107                if (time_after(ttd, fl->expires))
 108                        fl->expires = ttd;
 109                ttd = fl->expires;
 110                if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
 111                        struct ipv6_txoptions *opt = fl->opt;
 112                        fl->opt = NULL;
 113                        kfree(opt);
 114                }
 115                if (!timer_pending(&ip6_fl_gc_timer) ||
 116                    time_after(ip6_fl_gc_timer.expires, ttd))
 117                        mod_timer(&ip6_fl_gc_timer, ttd);
 118        }
 119        write_unlock_bh(&ip6_fl_lock);
 120}
 121
 122static void ip6_fl_gc(unsigned long dummy)
 123{
 124        int i;
 125        unsigned long now = jiffies;
 126        unsigned long sched = 0;
 127
 128        write_lock(&ip6_fl_lock);
 129
 130        for (i=0; i<=FL_HASH_MASK; i++) {
 131                struct ip6_flowlabel *fl, **flp;
 132                flp = &fl_ht[i];
 133                while ((fl=*flp) != NULL) {
 134                        if (atomic_read(&fl->users) == 0) {
 135                                unsigned long ttd = fl->lastuse + fl->linger;
 136                                if (time_after(ttd, fl->expires))
 137                                        fl->expires = ttd;
 138                                ttd = fl->expires;
 139                                if (time_after_eq(now, ttd)) {
 140                                        *flp = fl->next;
 141                                        fl_free(fl);
 142                                        atomic_dec(&fl_size);
 143                                        continue;
 144                                }
 145                                if (!sched || time_before(ttd, sched))
 146                                        sched = ttd;
 147                        }
 148                        flp = &fl->next;
 149                }
 150        }
 151        if (!sched && atomic_read(&fl_size))
 152                sched = now + FL_MAX_LINGER;
 153        if (sched) {
 154                mod_timer(&ip6_fl_gc_timer, sched);
 155        }
 156        write_unlock(&ip6_fl_lock);
 157}
 158
 159static void __net_exit ip6_fl_purge(struct net *net)
 160{
 161        int i;
 162
 163        write_lock(&ip6_fl_lock);
 164        for (i = 0; i <= FL_HASH_MASK; i++) {
 165                struct ip6_flowlabel *fl, **flp;
 166                flp = &fl_ht[i];
 167                while ((fl = *flp) != NULL) {
 168                        if (net_eq(fl->fl_net, net) &&
 169                            atomic_read(&fl->users) == 0) {
 170                                *flp = fl->next;
 171                                fl_free(fl);
 172                                atomic_dec(&fl_size);
 173                                continue;
 174                        }
 175                        flp = &fl->next;
 176                }
 177        }
 178        write_unlock(&ip6_fl_lock);
 179}
 180
 181static struct ip6_flowlabel *fl_intern(struct net *net,
 182                                       struct ip6_flowlabel *fl, __be32 label)
 183{
 184        struct ip6_flowlabel *lfl;
 185
 186        fl->label = label & IPV6_FLOWLABEL_MASK;
 187
 188        write_lock_bh(&ip6_fl_lock);
 189        if (label == 0) {
 190                for (;;) {
 191                        fl->label = htonl(net_random())&IPV6_FLOWLABEL_MASK;
 192                        if (fl->label) {
 193                                lfl = __fl_lookup(net, fl->label);
 194                                if (lfl == NULL)
 195                                        break;
 196                        }
 197                }
 198        } else {
 199                /*
 200                 * we dropper the ip6_fl_lock, so this entry could reappear
 201                 * and we need to recheck with it.
 202                 *
 203                 * OTOH no need to search the active socket first, like it is
 204                 * done in ipv6_flowlabel_opt - sock is locked, so new entry
 205                 * with the same label can only appear on another sock
 206                 */
 207                lfl = __fl_lookup(net, fl->label);
 208                if (lfl != NULL) {
 209                        atomic_inc(&lfl->users);
 210                        write_unlock_bh(&ip6_fl_lock);
 211                        return lfl;
 212                }
 213        }
 214
 215        fl->lastuse = jiffies;
 216        fl->next = fl_ht[FL_HASH(fl->label)];
 217        fl_ht[FL_HASH(fl->label)] = fl;
 218        atomic_inc(&fl_size);
 219        write_unlock_bh(&ip6_fl_lock);
 220        return NULL;
 221}
 222
 223
 224
 225/* Socket flowlabel lists */
 226
 227struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label)
 228{
 229        struct ipv6_fl_socklist *sfl;
 230        struct ipv6_pinfo *np = inet6_sk(sk);
 231
 232        label &= IPV6_FLOWLABEL_MASK;
 233
 234        read_lock_bh(&ip6_sk_fl_lock);
 235        for (sfl=np->ipv6_fl_list; sfl; sfl = sfl->next) {
 236                struct ip6_flowlabel *fl = sfl->fl;
 237                if (fl->label == label) {
 238                        fl->lastuse = jiffies;
 239                        atomic_inc(&fl->users);
 240                        read_unlock_bh(&ip6_sk_fl_lock);
 241                        return fl;
 242                }
 243        }
 244        read_unlock_bh(&ip6_sk_fl_lock);
 245        return NULL;
 246}
 247
 248EXPORT_SYMBOL_GPL(fl6_sock_lookup);
 249
 250void fl6_free_socklist(struct sock *sk)
 251{
 252        struct ipv6_pinfo *np = inet6_sk(sk);
 253        struct ipv6_fl_socklist *sfl;
 254
 255        while ((sfl = np->ipv6_fl_list) != NULL) {
 256                np->ipv6_fl_list = sfl->next;
 257                fl_release(sfl->fl);
 258                kfree(sfl);
 259        }
 260}
 261
 262/* Service routines */
 263
 264
 265/*
 266   It is the only difficult place. flowlabel enforces equal headers
 267   before and including routing header, however user may supply options
 268   following rthdr.
 269 */
 270
 271struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space,
 272                                         struct ip6_flowlabel * fl,
 273                                         struct ipv6_txoptions * fopt)
 274{
 275        struct ipv6_txoptions * fl_opt = fl->opt;
 276
 277        if (fopt == NULL || fopt->opt_flen == 0)
 278                return fl_opt;
 279
 280        if (fl_opt != NULL) {
 281                opt_space->hopopt = fl_opt->hopopt;
 282                opt_space->dst0opt = fl_opt->dst0opt;
 283                opt_space->srcrt = fl_opt->srcrt;
 284                opt_space->opt_nflen = fl_opt->opt_nflen;
 285        } else {
 286                if (fopt->opt_nflen == 0)
 287                        return fopt;
 288                opt_space->hopopt = NULL;
 289                opt_space->dst0opt = NULL;
 290                opt_space->srcrt = NULL;
 291                opt_space->opt_nflen = 0;
 292        }
 293        opt_space->dst1opt = fopt->dst1opt;
 294        opt_space->opt_flen = fopt->opt_flen;
 295        return opt_space;
 296}
 297EXPORT_SYMBOL_GPL(fl6_merge_options);
 298
 299static unsigned long check_linger(unsigned long ttl)
 300{
 301        if (ttl < FL_MIN_LINGER)
 302                return FL_MIN_LINGER*HZ;
 303        if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN))
 304                return 0;
 305        return ttl*HZ;
 306}
 307
 308static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
 309{
 310        linger = check_linger(linger);
 311        if (!linger)
 312                return -EPERM;
 313        expires = check_linger(expires);
 314        if (!expires)
 315                return -EPERM;
 316        fl->lastuse = jiffies;
 317        if (time_before(fl->linger, linger))
 318                fl->linger = linger;
 319        if (time_before(expires, fl->linger))
 320                expires = fl->linger;
 321        if (time_before(fl->expires, fl->lastuse + expires))
 322                fl->expires = fl->lastuse + expires;
 323        return 0;
 324}
 325
 326static struct ip6_flowlabel *
 327fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
 328          char __user *optval, int optlen, int *err_p)
 329{
 330        struct ip6_flowlabel *fl = NULL;
 331        int olen;
 332        int addr_type;
 333        int err;
 334
 335        olen = optlen - CMSG_ALIGN(sizeof(*freq));
 336        err = -EINVAL;
 337        if (olen > 64 * 1024)
 338                goto done;
 339
 340        err = -ENOMEM;
 341        fl = kzalloc(sizeof(*fl), GFP_KERNEL);
 342        if (fl == NULL)
 343                goto done;
 344
 345        if (olen > 0) {
 346                struct msghdr msg;
 347                struct flowi6 flowi6;
 348                int junk;
 349
 350                err = -ENOMEM;
 351                fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
 352                if (fl->opt == NULL)
 353                        goto done;
 354
 355                memset(fl->opt, 0, sizeof(*fl->opt));
 356                fl->opt->tot_len = sizeof(*fl->opt) + olen;
 357                err = -EFAULT;
 358                if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
 359                        goto done;
 360
 361                msg.msg_controllen = olen;
 362                msg.msg_control = (void*)(fl->opt+1);
 363                memset(&flowi6, 0, sizeof(flowi6));
 364
 365                err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk,
 366                                        &junk, &junk);
 367                if (err)
 368                        goto done;
 369                err = -EINVAL;
 370                if (fl->opt->opt_flen)
 371                        goto done;
 372                if (fl->opt->opt_nflen == 0) {
 373                        kfree(fl->opt);
 374                        fl->opt = NULL;
 375                }
 376        }
 377
 378        fl->fl_net = hold_net(net);
 379        fl->expires = jiffies;
 380        err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
 381        if (err)
 382                goto done;
 383        fl->share = freq->flr_share;
 384        addr_type = ipv6_addr_type(&freq->flr_dst);
 385        if ((addr_type & IPV6_ADDR_MAPPED) ||
 386            addr_type == IPV6_ADDR_ANY) {
 387                err = -EINVAL;
 388                goto done;
 389        }
 390        fl->dst = freq->flr_dst;
 391        atomic_set(&fl->users, 1);
 392        switch (fl->share) {
 393        case IPV6_FL_S_EXCL:
 394        case IPV6_FL_S_ANY:
 395                break;
 396        case IPV6_FL_S_PROCESS:
 397                fl->owner = current->pid;
 398                break;
 399        case IPV6_FL_S_USER:
 400                fl->owner = current_euid();
 401                break;
 402        default:
 403                err = -EINVAL;
 404                goto done;
 405        }
 406        return fl;
 407
 408done:
 409        fl_free(fl);
 410        *err_p = err;
 411        return NULL;
 412}
 413
 414static int mem_check(struct sock *sk)
 415{
 416        struct ipv6_pinfo *np = inet6_sk(sk);
 417        struct ipv6_fl_socklist *sfl;
 418        int room = FL_MAX_SIZE - atomic_read(&fl_size);
 419        int count = 0;
 420
 421        if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
 422                return 0;
 423
 424        for (sfl = np->ipv6_fl_list; sfl; sfl = sfl->next)
 425                count++;
 426
 427        if (room <= 0 ||
 428            ((count >= FL_MAX_PER_SOCK ||
 429              (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
 430             !capable(CAP_NET_ADMIN)))
 431                return -ENOBUFS;
 432
 433        return 0;
 434}
 435
 436static bool ipv6_hdr_cmp(struct ipv6_opt_hdr *h1, struct ipv6_opt_hdr *h2)
 437{
 438        if (h1 == h2)
 439                return false;
 440        if (h1 == NULL || h2 == NULL)
 441                return true;
 442        if (h1->hdrlen != h2->hdrlen)
 443                return true;
 444        return memcmp(h1+1, h2+1, ((h1->hdrlen+1)<<3) - sizeof(*h1));
 445}
 446
 447static bool ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2)
 448{
 449        if (o1 == o2)
 450                return false;
 451        if (o1 == NULL || o2 == NULL)
 452                return true;
 453        if (o1->opt_nflen != o2->opt_nflen)
 454                return true;
 455        if (ipv6_hdr_cmp(o1->hopopt, o2->hopopt))
 456                return true;
 457        if (ipv6_hdr_cmp(o1->dst0opt, o2->dst0opt))
 458                return true;
 459        if (ipv6_hdr_cmp((struct ipv6_opt_hdr *)o1->srcrt, (struct ipv6_opt_hdr *)o2->srcrt))
 460                return true;
 461        return false;
 462}
 463
 464static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
 465                struct ip6_flowlabel *fl)
 466{
 467        write_lock_bh(&ip6_sk_fl_lock);
 468        sfl->fl = fl;
 469        sfl->next = np->ipv6_fl_list;
 470        np->ipv6_fl_list = sfl;
 471        write_unlock_bh(&ip6_sk_fl_lock);
 472}
 473
 474int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
 475{
 476        int uninitialized_var(err);
 477        struct net *net = sock_net(sk);
 478        struct ipv6_pinfo *np = inet6_sk(sk);
 479        struct in6_flowlabel_req freq;
 480        struct ipv6_fl_socklist *sfl1=NULL;
 481        struct ipv6_fl_socklist *sfl, **sflp;
 482        struct ip6_flowlabel *fl, *fl1 = NULL;
 483
 484
 485        if (optlen < sizeof(freq))
 486                return -EINVAL;
 487
 488        if (copy_from_user(&freq, optval, sizeof(freq)))
 489                return -EFAULT;
 490
 491        switch (freq.flr_action) {
 492        case IPV6_FL_A_PUT:
 493                write_lock_bh(&ip6_sk_fl_lock);
 494                for (sflp = &np->ipv6_fl_list; (sfl=*sflp)!=NULL; sflp = &sfl->next) {
 495                        if (sfl->fl->label == freq.flr_label) {
 496                                if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
 497                                        np->flow_label &= ~IPV6_FLOWLABEL_MASK;
 498                                *sflp = sfl->next;
 499                                write_unlock_bh(&ip6_sk_fl_lock);
 500                                fl_release(sfl->fl);
 501                                kfree(sfl);
 502                                return 0;
 503                        }
 504                }
 505                write_unlock_bh(&ip6_sk_fl_lock);
 506                return -ESRCH;
 507
 508        case IPV6_FL_A_RENEW:
 509                read_lock_bh(&ip6_sk_fl_lock);
 510                for (sfl = np->ipv6_fl_list; sfl; sfl = sfl->next) {
 511                        if (sfl->fl->label == freq.flr_label) {
 512                                err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
 513                                read_unlock_bh(&ip6_sk_fl_lock);
 514                                return err;
 515                        }
 516                }
 517                read_unlock_bh(&ip6_sk_fl_lock);
 518
 519                if (freq.flr_share == IPV6_FL_S_NONE && capable(CAP_NET_ADMIN)) {
 520                        fl = fl_lookup(net, freq.flr_label);
 521                        if (fl) {
 522                                err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
 523                                fl_release(fl);
 524                                return err;
 525                        }
 526                }
 527                return -ESRCH;
 528
 529        case IPV6_FL_A_GET:
 530                if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
 531                        return -EINVAL;
 532
 533                fl = fl_create(net, sk, &freq, optval, optlen, &err);
 534                if (fl == NULL)
 535                        return err;
 536                sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
 537
 538                if (freq.flr_label) {
 539                        err = -EEXIST;
 540                        read_lock_bh(&ip6_sk_fl_lock);
 541                        for (sfl = np->ipv6_fl_list; sfl; sfl = sfl->next) {
 542                                if (sfl->fl->label == freq.flr_label) {
 543                                        if (freq.flr_flags&IPV6_FL_F_EXCL) {
 544                                                read_unlock_bh(&ip6_sk_fl_lock);
 545                                                goto done;
 546                                        }
 547                                        fl1 = sfl->fl;
 548                                        atomic_inc(&fl1->users);
 549                                        break;
 550                                }
 551                        }
 552                        read_unlock_bh(&ip6_sk_fl_lock);
 553
 554                        if (fl1 == NULL)
 555                                fl1 = fl_lookup(net, freq.flr_label);
 556                        if (fl1) {
 557recheck:
 558                                err = -EEXIST;
 559                                if (freq.flr_flags&IPV6_FL_F_EXCL)
 560                                        goto release;
 561                                err = -EPERM;
 562                                if (fl1->share == IPV6_FL_S_EXCL ||
 563                                    fl1->share != fl->share ||
 564                                    fl1->owner != fl->owner)
 565                                        goto release;
 566
 567                                err = -EINVAL;
 568                                if (!ipv6_addr_equal(&fl1->dst, &fl->dst) ||
 569                                    ipv6_opt_cmp(fl1->opt, fl->opt))
 570                                        goto release;
 571
 572                                err = -ENOMEM;
 573                                if (sfl1 == NULL)
 574                                        goto release;
 575                                if (fl->linger > fl1->linger)
 576                                        fl1->linger = fl->linger;
 577                                if ((long)(fl->expires - fl1->expires) > 0)
 578                                        fl1->expires = fl->expires;
 579                                fl_link(np, sfl1, fl1);
 580                                fl_free(fl);
 581                                return 0;
 582
 583release:
 584                                fl_release(fl1);
 585                                goto done;
 586                        }
 587                }
 588                err = -ENOENT;
 589                if (!(freq.flr_flags&IPV6_FL_F_CREATE))
 590                        goto done;
 591
 592                err = -ENOMEM;
 593                if (sfl1 == NULL || (err = mem_check(sk)) != 0)
 594                        goto done;
 595
 596                fl1 = fl_intern(net, fl, freq.flr_label);
 597                if (fl1 != NULL)
 598                        goto recheck;
 599
 600                if (!freq.flr_label) {
 601                        if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
 602                                         &fl->label, sizeof(fl->label))) {
 603                                /* Intentionally ignore fault. */
 604                        }
 605                }
 606
 607                fl_link(np, sfl1, fl);
 608                return 0;
 609
 610        default:
 611                return -EINVAL;
 612        }
 613
 614done:
 615        fl_free(fl);
 616        kfree(sfl1);
 617        return err;
 618}
 619
 620#ifdef CONFIG_PROC_FS
 621
 622struct ip6fl_iter_state {
 623        struct seq_net_private p;
 624        int bucket;
 625};
 626
 627#define ip6fl_seq_private(seq)  ((struct ip6fl_iter_state *)(seq)->private)
 628
 629static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
 630{
 631        struct ip6_flowlabel *fl = NULL;
 632        struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
 633        struct net *net = seq_file_net(seq);
 634
 635        for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
 636                fl = fl_ht[state->bucket];
 637
 638                while (fl && !net_eq(fl->fl_net, net))
 639                        fl = fl->next;
 640                if (fl)
 641                        break;
 642        }
 643        return fl;
 644}
 645
 646static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
 647{
 648        struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
 649        struct net *net = seq_file_net(seq);
 650
 651        fl = fl->next;
 652try_again:
 653        while (fl && !net_eq(fl->fl_net, net))
 654                fl = fl->next;
 655
 656        while (!fl) {
 657                if (++state->bucket <= FL_HASH_MASK) {
 658                        fl = fl_ht[state->bucket];
 659                        goto try_again;
 660                } else
 661                        break;
 662        }
 663        return fl;
 664}
 665
 666static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
 667{
 668        struct ip6_flowlabel *fl = ip6fl_get_first(seq);
 669        if (fl)
 670                while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
 671                        --pos;
 672        return pos ? NULL : fl;
 673}
 674
 675static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
 676        __acquires(ip6_fl_lock)
 677{
 678        read_lock_bh(&ip6_fl_lock);
 679        return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
 680}
 681
 682static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 683{
 684        struct ip6_flowlabel *fl;
 685
 686        if (v == SEQ_START_TOKEN)
 687                fl = ip6fl_get_first(seq);
 688        else
 689                fl = ip6fl_get_next(seq, v);
 690        ++*pos;
 691        return fl;
 692}
 693
 694static void ip6fl_seq_stop(struct seq_file *seq, void *v)
 695        __releases(ip6_fl_lock)
 696{
 697        read_unlock_bh(&ip6_fl_lock);
 698}
 699
 700static int ip6fl_seq_show(struct seq_file *seq, void *v)
 701{
 702        if (v == SEQ_START_TOKEN)
 703                seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
 704                           "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
 705        else {
 706                struct ip6_flowlabel *fl = v;
 707                seq_printf(seq,
 708                           "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
 709                           (unsigned int)ntohl(fl->label),
 710                           fl->share,
 711                           (int)fl->owner,
 712                           atomic_read(&fl->users),
 713                           fl->linger/HZ,
 714                           (long)(fl->expires - jiffies)/HZ,
 715                           &fl->dst,
 716                           fl->opt ? fl->opt->opt_nflen : 0);
 717        }
 718        return 0;
 719}
 720
 721static const struct seq_operations ip6fl_seq_ops = {
 722        .start  =       ip6fl_seq_start,
 723        .next   =       ip6fl_seq_next,
 724        .stop   =       ip6fl_seq_stop,
 725        .show   =       ip6fl_seq_show,
 726};
 727
 728static int ip6fl_seq_open(struct inode *inode, struct file *file)
 729{
 730        return seq_open_net(inode, file, &ip6fl_seq_ops,
 731                            sizeof(struct ip6fl_iter_state));
 732}
 733
 734static const struct file_operations ip6fl_seq_fops = {
 735        .owner          =       THIS_MODULE,
 736        .open           =       ip6fl_seq_open,
 737        .read           =       seq_read,
 738        .llseek         =       seq_lseek,
 739        .release        =       seq_release_net,
 740};
 741
 742static int __net_init ip6_flowlabel_proc_init(struct net *net)
 743{
 744        if (!proc_net_fops_create(net, "ip6_flowlabel",
 745                                  S_IRUGO, &ip6fl_seq_fops))
 746                return -ENOMEM;
 747        return 0;
 748}
 749
 750static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
 751{
 752        proc_net_remove(net, "ip6_flowlabel");
 753}
 754#else
 755static inline int ip6_flowlabel_proc_init(struct net *net)
 756{
 757        return 0;
 758}
 759static inline void ip6_flowlabel_proc_fini(struct net *net)
 760{
 761}
 762#endif
 763
 764static void __net_exit ip6_flowlabel_net_exit(struct net *net)
 765{
 766        ip6_fl_purge(net);
 767        ip6_flowlabel_proc_fini(net);
 768}
 769
 770static struct pernet_operations ip6_flowlabel_net_ops = {
 771        .init = ip6_flowlabel_proc_init,
 772        .exit = ip6_flowlabel_net_exit,
 773};
 774
 775int ip6_flowlabel_init(void)
 776{
 777        return register_pernet_subsys(&ip6_flowlabel_net_ops);
 778}
 779
 780void ip6_flowlabel_cleanup(void)
 781{
 782        del_timer(&ip6_fl_gc_timer);
 783        unregister_pernet_subsys(&ip6_flowlabel_net_ops);
 784}
 785
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.