linux/net/netfilter/nf_conntrack_expect.c
<<
>>
Prefs
   1/* Expectation handling for nf_conntrack. */
   2
   3/* (C) 1999-2001 Paul `Rusty' Russell
   4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
   5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
   6 * (c) 2005-2012 Patrick McHardy <kaber@trash.net>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#include <linux/types.h>
  14#include <linux/netfilter.h>
  15#include <linux/skbuff.h>
  16#include <linux/proc_fs.h>
  17#include <linux/seq_file.h>
  18#include <linux/stddef.h>
  19#include <linux/slab.h>
  20#include <linux/err.h>
  21#include <linux/percpu.h>
  22#include <linux/kernel.h>
  23#include <linux/jhash.h>
  24#include <linux/moduleparam.h>
  25#include <linux/export.h>
  26#include <net/net_namespace.h>
  27
  28#include <net/netfilter/nf_conntrack.h>
  29#include <net/netfilter/nf_conntrack_core.h>
  30#include <net/netfilter/nf_conntrack_expect.h>
  31#include <net/netfilter/nf_conntrack_helper.h>
  32#include <net/netfilter/nf_conntrack_tuple.h>
  33#include <net/netfilter/nf_conntrack_zones.h>
  34
  35unsigned int nf_ct_expect_hsize __read_mostly;
  36EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
  37
  38unsigned int nf_ct_expect_max __read_mostly;
  39
  40static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
  41
  42/* nf_conntrack_expect helper functions */
  43void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
  44                                u32 portid, int report)
  45{
  46        struct nf_conn_help *master_help = nfct_help(exp->master);
  47        struct net *net = nf_ct_exp_net(exp);
  48
  49        NF_CT_ASSERT(master_help);
  50        NF_CT_ASSERT(!timer_pending(&exp->timeout));
  51
  52        hlist_del_rcu(&exp->hnode);
  53        net->ct.expect_count--;
  54
  55        hlist_del(&exp->lnode);
  56        master_help->expecting[exp->class]--;
  57
  58        nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
  59        nf_ct_expect_put(exp);
  60
  61        NF_CT_STAT_INC(net, expect_delete);
  62}
  63EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
  64
  65static void nf_ct_expectation_timed_out(unsigned long ul_expect)
  66{
  67        struct nf_conntrack_expect *exp = (void *)ul_expect;
  68
  69        spin_lock_bh(&nf_conntrack_lock);
  70        nf_ct_unlink_expect(exp);
  71        spin_unlock_bh(&nf_conntrack_lock);
  72        nf_ct_expect_put(exp);
  73}
  74
  75static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
  76{
  77        unsigned int hash;
  78
  79        if (unlikely(!nf_conntrack_hash_rnd)) {
  80                init_nf_conntrack_hash_rnd();
  81        }
  82
  83        hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
  84                      (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
  85                       (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
  86        return ((u64)hash * nf_ct_expect_hsize) >> 32;
  87}
  88
  89struct nf_conntrack_expect *
  90__nf_ct_expect_find(struct net *net, u16 zone,
  91                    const struct nf_conntrack_tuple *tuple)
  92{
  93        struct nf_conntrack_expect *i;
  94        unsigned int h;
  95
  96        if (!net->ct.expect_count)
  97                return NULL;
  98
  99        h = nf_ct_expect_dst_hash(tuple);
 100        hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
 101                if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
 102                    nf_ct_zone(i->master) == zone)
 103                        return i;
 104        }
 105        return NULL;
 106}
 107EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
 108
 109/* Just find a expectation corresponding to a tuple. */
 110struct nf_conntrack_expect *
 111nf_ct_expect_find_get(struct net *net, u16 zone,
 112                      const struct nf_conntrack_tuple *tuple)
 113{
 114        struct nf_conntrack_expect *i;
 115
 116        rcu_read_lock();
 117        i = __nf_ct_expect_find(net, zone, tuple);
 118        if (i && !atomic_inc_not_zero(&i->use))
 119                i = NULL;
 120        rcu_read_unlock();
 121
 122        return i;
 123}
 124EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
 125
 126/* If an expectation for this connection is found, it gets delete from
 127 * global list then returned. */
 128struct nf_conntrack_expect *
 129nf_ct_find_expectation(struct net *net, u16 zone,
 130                       const struct nf_conntrack_tuple *tuple)
 131{
 132        struct nf_conntrack_expect *i, *exp = NULL;
 133        unsigned int h;
 134
 135        if (!net->ct.expect_count)
 136                return NULL;
 137
 138        h = nf_ct_expect_dst_hash(tuple);
 139        hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
 140                if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
 141                    nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
 142                    nf_ct_zone(i->master) == zone) {
 143                        exp = i;
 144                        break;
 145                }
 146        }
 147        if (!exp)
 148                return NULL;
 149
 150        /* If master is not in hash table yet (ie. packet hasn't left
 151           this machine yet), how can other end know about expected?
 152           Hence these are not the droids you are looking for (if
 153           master ct never got confirmed, we'd hold a reference to it
 154           and weird things would happen to future packets). */
 155        if (!nf_ct_is_confirmed(exp->master))
 156                return NULL;
 157
 158        if (exp->flags & NF_CT_EXPECT_PERMANENT) {
 159                atomic_inc(&exp->use);
 160                return exp;
 161        } else if (del_timer(&exp->timeout)) {
 162                nf_ct_unlink_expect(exp);
 163                return exp;
 164        }
 165
 166        return NULL;
 167}
 168
 169/* delete all expectations for this conntrack */
 170void nf_ct_remove_expectations(struct nf_conn *ct)
 171{
 172        struct nf_conn_help *help = nfct_help(ct);
 173        struct nf_conntrack_expect *exp;
 174        struct hlist_node *next;
 175
 176        /* Optimization: most connection never expect any others. */
 177        if (!help)
 178                return;
 179
 180        hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
 181                if (del_timer(&exp->timeout)) {
 182                        nf_ct_unlink_expect(exp);
 183                        nf_ct_expect_put(exp);
 184                }
 185        }
 186}
 187EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
 188
 189/* Would two expected things clash? */
 190static inline int expect_clash(const struct nf_conntrack_expect *a,
 191                               const struct nf_conntrack_expect *b)
 192{
 193        /* Part covered by intersection of masks must be unequal,
 194           otherwise they clash */
 195        struct nf_conntrack_tuple_mask intersect_mask;
 196        int count;
 197
 198        intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
 199
 200        for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
 201                intersect_mask.src.u3.all[count] =
 202                        a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
 203        }
 204
 205        return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
 206}
 207
 208static inline int expect_matches(const struct nf_conntrack_expect *a,
 209                                 const struct nf_conntrack_expect *b)
 210{
 211        return a->master == b->master && a->class == b->class &&
 212                nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
 213                nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
 214                nf_ct_zone(a->master) == nf_ct_zone(b->master);
 215}
 216
 217/* Generally a bad idea to call this: could have matched already. */
 218void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
 219{
 220        spin_lock_bh(&nf_conntrack_lock);
 221        if (del_timer(&exp->timeout)) {
 222                nf_ct_unlink_expect(exp);
 223                nf_ct_expect_put(exp);
 224        }
 225        spin_unlock_bh(&nf_conntrack_lock);
 226}
 227EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
 228
 229/* We don't increase the master conntrack refcount for non-fulfilled
 230 * conntracks. During the conntrack destruction, the expectations are
 231 * always killed before the conntrack itself */
 232struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
 233{
 234        struct nf_conntrack_expect *new;
 235
 236        new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
 237        if (!new)
 238                return NULL;
 239
 240        new->master = me;
 241        atomic_set(&new->use, 1);
 242        return new;
 243}
 244EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
 245
 246void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
 247                       u_int8_t family,
 248                       const union nf_inet_addr *saddr,
 249                       const union nf_inet_addr *daddr,
 250                       u_int8_t proto, const __be16 *src, const __be16 *dst)
 251{
 252        int len;
 253
 254        if (family == AF_INET)
 255                len = 4;
 256        else
 257                len = 16;
 258
 259        exp->flags = 0;
 260        exp->class = class;
 261        exp->expectfn = NULL;
 262        exp->helper = NULL;
 263        exp->tuple.src.l3num = family;
 264        exp->tuple.dst.protonum = proto;
 265
 266        if (saddr) {
 267                memcpy(&exp->tuple.src.u3, saddr, len);
 268                if (sizeof(exp->tuple.src.u3) > len)
 269                        /* address needs to be cleared for nf_ct_tuple_equal */
 270                        memset((void *)&exp->tuple.src.u3 + len, 0x00,
 271                               sizeof(exp->tuple.src.u3) - len);
 272                memset(&exp->mask.src.u3, 0xFF, len);
 273                if (sizeof(exp->mask.src.u3) > len)
 274                        memset((void *)&exp->mask.src.u3 + len, 0x00,
 275                               sizeof(exp->mask.src.u3) - len);
 276        } else {
 277                memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
 278                memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
 279        }
 280
 281        if (src) {
 282                exp->tuple.src.u.all = *src;
 283                exp->mask.src.u.all = htons(0xFFFF);
 284        } else {
 285                exp->tuple.src.u.all = 0;
 286                exp->mask.src.u.all = 0;
 287        }
 288
 289        memcpy(&exp->tuple.dst.u3, daddr, len);
 290        if (sizeof(exp->tuple.dst.u3) > len)
 291                /* address needs to be cleared for nf_ct_tuple_equal */
 292                memset((void *)&exp->tuple.dst.u3 + len, 0x00,
 293                       sizeof(exp->tuple.dst.u3) - len);
 294
 295        exp->tuple.dst.u.all = *dst;
 296
 297#ifdef CONFIG_NF_NAT_NEEDED
 298        memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
 299        memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
 300#endif
 301}
 302EXPORT_SYMBOL_GPL(nf_ct_expect_init);
 303
 304static void nf_ct_expect_free_rcu(struct rcu_head *head)
 305{
 306        struct nf_conntrack_expect *exp;
 307
 308        exp = container_of(head, struct nf_conntrack_expect, rcu);
 309        kmem_cache_free(nf_ct_expect_cachep, exp);
 310}
 311
 312void nf_ct_expect_put(struct nf_conntrack_expect *exp)
 313{
 314        if (atomic_dec_and_test(&exp->use))
 315                call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
 316}
 317EXPORT_SYMBOL_GPL(nf_ct_expect_put);
 318
 319static int nf_ct_expect_insert(struct nf_conntrack_expect *exp)
 320{
 321        struct nf_conn_help *master_help = nfct_help(exp->master);
 322        struct nf_conntrack_helper *helper;
 323        struct net *net = nf_ct_exp_net(exp);
 324        unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
 325
 326        /* two references : one for hash insert, one for the timer */
 327        atomic_add(2, &exp->use);
 328
 329        hlist_add_head(&exp->lnode, &master_help->expectations);
 330        master_help->expecting[exp->class]++;
 331
 332        hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
 333        net->ct.expect_count++;
 334
 335        setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
 336                    (unsigned long)exp);
 337        helper = rcu_dereference_protected(master_help->helper,
 338                                           lockdep_is_held(&nf_conntrack_lock));
 339        if (helper) {
 340                exp->timeout.expires = jiffies +
 341                        helper->expect_policy[exp->class].timeout * HZ;
 342        }
 343        add_timer(&exp->timeout);
 344
 345        NF_CT_STAT_INC(net, expect_create);
 346        return 0;
 347}
 348
 349/* Race with expectations being used means we could have none to find; OK. */
 350static void evict_oldest_expect(struct nf_conn *master,
 351                                struct nf_conntrack_expect *new)
 352{
 353        struct nf_conn_help *master_help = nfct_help(master);
 354        struct nf_conntrack_expect *exp, *last = NULL;
 355
 356        hlist_for_each_entry(exp, &master_help->expectations, lnode) {
 357                if (exp->class == new->class)
 358                        last = exp;
 359        }
 360
 361        if (last && del_timer(&last->timeout)) {
 362                nf_ct_unlink_expect(last);
 363                nf_ct_expect_put(last);
 364        }
 365}
 366
 367static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
 368{
 369        const struct nf_conntrack_expect_policy *p;
 370        struct nf_conntrack_expect *i;
 371        struct nf_conn *master = expect->master;
 372        struct nf_conn_help *master_help = nfct_help(master);
 373        struct nf_conntrack_helper *helper;
 374        struct net *net = nf_ct_exp_net(expect);
 375        struct hlist_node *next;
 376        unsigned int h;
 377        int ret = 1;
 378
 379        if (!master_help) {
 380                ret = -ESHUTDOWN;
 381                goto out;
 382        }
 383        h = nf_ct_expect_dst_hash(&expect->tuple);
 384        hlist_for_each_entry_safe(i, next, &net->ct.expect_hash[h], hnode) {
 385                if (expect_matches(i, expect)) {
 386                        if (del_timer(&i->timeout)) {
 387                                nf_ct_unlink_expect(i);
 388                                nf_ct_expect_put(i);
 389                                break;
 390                        }
 391                } else if (expect_clash(i, expect)) {
 392                        ret = -EBUSY;
 393                        goto out;
 394                }
 395        }
 396        /* Will be over limit? */
 397        helper = rcu_dereference_protected(master_help->helper,
 398                                           lockdep_is_held(&nf_conntrack_lock));
 399        if (helper) {
 400                p = &helper->expect_policy[expect->class];
 401                if (p->max_expected &&
 402                    master_help->expecting[expect->class] >= p->max_expected) {
 403                        evict_oldest_expect(master, expect);
 404                        if (master_help->expecting[expect->class]
 405                                                >= p->max_expected) {
 406                                ret = -EMFILE;
 407                                goto out;
 408                        }
 409                }
 410        }
 411
 412        if (net->ct.expect_count >= nf_ct_expect_max) {
 413                net_warn_ratelimited("nf_conntrack: expectation table full\n");
 414                ret = -EMFILE;
 415        }
 416out:
 417        return ret;
 418}
 419
 420int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, 
 421                                u32 portid, int report)
 422{
 423        int ret;
 424
 425        spin_lock_bh(&nf_conntrack_lock);
 426        ret = __nf_ct_expect_check(expect);
 427        if (ret <= 0)
 428                goto out;
 429
 430        ret = nf_ct_expect_insert(expect);
 431        if (ret < 0)
 432                goto out;
 433        spin_unlock_bh(&nf_conntrack_lock);
 434        nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
 435        return ret;
 436out:
 437        spin_unlock_bh(&nf_conntrack_lock);
 438        return ret;
 439}
 440EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
 441
 442#ifdef CONFIG_NF_CONNTRACK_PROCFS
 443struct ct_expect_iter_state {
 444        struct seq_net_private p;
 445        unsigned int bucket;
 446};
 447
 448static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
 449{
 450        struct net *net = seq_file_net(seq);
 451        struct ct_expect_iter_state *st = seq->private;
 452        struct hlist_node *n;
 453
 454        for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
 455                n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
 456                if (n)
 457                        return n;
 458        }
 459        return NULL;
 460}
 461
 462static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
 463                                             struct hlist_node *head)
 464{
 465        struct net *net = seq_file_net(seq);
 466        struct ct_expect_iter_state *st = seq->private;
 467
 468        head = rcu_dereference(hlist_next_rcu(head));
 469        while (head == NULL) {
 470                if (++st->bucket >= nf_ct_expect_hsize)
 471                        return NULL;
 472                head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
 473        }
 474        return head;
 475}
 476
 477static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
 478{
 479        struct hlist_node *head = ct_expect_get_first(seq);
 480
 481        if (head)
 482                while (pos && (head = ct_expect_get_next(seq, head)))
 483                        pos--;
 484        return pos ? NULL : head;
 485}
 486
 487static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
 488        __acquires(RCU)
 489{
 490        rcu_read_lock();
 491        return ct_expect_get_idx(seq, *pos);
 492}
 493
 494static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 495{
 496        (*pos)++;
 497        return ct_expect_get_next(seq, v);
 498}
 499
 500static void exp_seq_stop(struct seq_file *seq, void *v)
 501        __releases(RCU)
 502{
 503        rcu_read_unlock();
 504}
 505
 506static int exp_seq_show(struct seq_file *s, void *v)
 507{
 508        struct nf_conntrack_expect *expect;
 509        struct nf_conntrack_helper *helper;
 510        struct hlist_node *n = v;
 511        char *delim = "";
 512
 513        expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
 514
 515        if (expect->timeout.function)
 516                seq_printf(s, "%ld ", timer_pending(&expect->timeout)
 517                           ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
 518        else
 519                seq_printf(s, "- ");
 520        seq_printf(s, "l3proto = %u proto=%u ",
 521                   expect->tuple.src.l3num,
 522                   expect->tuple.dst.protonum);
 523        print_tuple(s, &expect->tuple,
 524                    __nf_ct_l3proto_find(expect->tuple.src.l3num),
 525                    __nf_ct_l4proto_find(expect->tuple.src.l3num,
 526                                       expect->tuple.dst.protonum));
 527
 528        if (expect->flags & NF_CT_EXPECT_PERMANENT) {
 529                seq_printf(s, "PERMANENT");
 530                delim = ",";
 531        }
 532        if (expect->flags & NF_CT_EXPECT_INACTIVE) {
 533                seq_printf(s, "%sINACTIVE", delim);
 534                delim = ",";
 535        }
 536        if (expect->flags & NF_CT_EXPECT_USERSPACE)
 537                seq_printf(s, "%sUSERSPACE", delim);
 538
 539        helper = rcu_dereference(nfct_help(expect->master)->helper);
 540        if (helper) {
 541                seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
 542                if (helper->expect_policy[expect->class].name)
 543                        seq_printf(s, "/%s",
 544                                   helper->expect_policy[expect->class].name);
 545        }
 546
 547        return seq_putc(s, '\n');
 548}
 549
 550static const struct seq_operations exp_seq_ops = {
 551        .start = exp_seq_start,
 552        .next = exp_seq_next,
 553        .stop = exp_seq_stop,
 554        .show = exp_seq_show
 555};
 556
 557static int exp_open(struct inode *inode, struct file *file)
 558{
 559        return seq_open_net(inode, file, &exp_seq_ops,
 560                        sizeof(struct ct_expect_iter_state));
 561}
 562
 563static const struct file_operations exp_file_ops = {
 564        .owner   = THIS_MODULE,
 565        .open    = exp_open,
 566        .read    = seq_read,
 567        .llseek  = seq_lseek,
 568        .release = seq_release_net,
 569};
 570#endif /* CONFIG_NF_CONNTRACK_PROCFS */
 571
 572static int exp_proc_init(struct net *net)
 573{
 574#ifdef CONFIG_NF_CONNTRACK_PROCFS
 575        struct proc_dir_entry *proc;
 576
 577        proc = proc_create("nf_conntrack_expect", 0440, net->proc_net,
 578                           &exp_file_ops);
 579        if (!proc)
 580                return -ENOMEM;
 581#endif /* CONFIG_NF_CONNTRACK_PROCFS */
 582        return 0;
 583}
 584
 585static void exp_proc_remove(struct net *net)
 586{
 587#ifdef CONFIG_NF_CONNTRACK_PROCFS
 588        remove_proc_entry("nf_conntrack_expect", net->proc_net);
 589#endif /* CONFIG_NF_CONNTRACK_PROCFS */
 590}
 591
 592module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
 593
 594int nf_conntrack_expect_pernet_init(struct net *net)
 595{
 596        int err = -ENOMEM;
 597
 598        net->ct.expect_count = 0;
 599        net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
 600        if (net->ct.expect_hash == NULL)
 601                goto err1;
 602
 603        err = exp_proc_init(net);
 604        if (err < 0)
 605                goto err2;
 606
 607        return 0;
 608err2:
 609        nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
 610err1:
 611        return err;
 612}
 613
 614void nf_conntrack_expect_pernet_fini(struct net *net)
 615{
 616        exp_proc_remove(net);
 617        nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
 618}
 619
 620int nf_conntrack_expect_init(void)
 621{
 622        if (!nf_ct_expect_hsize) {
 623                nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
 624                if (!nf_ct_expect_hsize)
 625                        nf_ct_expect_hsize = 1;
 626        }
 627        nf_ct_expect_max = nf_ct_expect_hsize * 4;
 628        nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
 629                                sizeof(struct nf_conntrack_expect),
 630                                0, 0, NULL);
 631        if (!nf_ct_expect_cachep)
 632                return -ENOMEM;
 633        return 0;
 634}
 635
 636void nf_conntrack_expect_fini(void)
 637{
 638        rcu_barrier(); /* Wait for call_rcu() before destroy */
 639        kmem_cache_destroy(nf_ct_expect_cachep);
 640}
 641
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.