linux/net/netfilter/nf_conntrack_core.c
<<
>>
Prefs
   1/* Connection state tracking for netfilter.  This is separated from,
   2   but required by, the NAT layer; it can also be used by an iptables
   3   extension. */
   4
   5/* (C) 1999-2001 Paul `Rusty' Russell
   6 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
   7 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13
  14#include <linux/types.h>
  15#include <linux/netfilter.h>
  16#include <linux/module.h>
  17#include <linux/skbuff.h>
  18#include <linux/proc_fs.h>
  19#include <linux/vmalloc.h>
  20#include <linux/stddef.h>
  21#include <linux/slab.h>
  22#include <linux/random.h>
  23#include <linux/jhash.h>
  24#include <linux/err.h>
  25#include <linux/percpu.h>
  26#include <linux/moduleparam.h>
  27#include <linux/notifier.h>
  28#include <linux/kernel.h>
  29#include <linux/netdevice.h>
  30#include <linux/socket.h>
  31#include <linux/mm.h>
  32#include <linux/rculist_nulls.h>
  33
  34#include <net/netfilter/nf_conntrack.h>
  35#include <net/netfilter/nf_conntrack_l3proto.h>
  36#include <net/netfilter/nf_conntrack_l4proto.h>
  37#include <net/netfilter/nf_conntrack_expect.h>
  38#include <net/netfilter/nf_conntrack_helper.h>
  39#include <net/netfilter/nf_conntrack_core.h>
  40#include <net/netfilter/nf_conntrack_extend.h>
  41#include <net/netfilter/nf_conntrack_acct.h>
  42#include <net/netfilter/nf_nat.h>
  43#include <net/netfilter/nf_nat_core.h>
  44
  45#define NF_CONNTRACK_VERSION    "0.5.0"
  46
  47int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
  48                                      enum nf_nat_manip_type manip,
  49                                      struct nlattr *attr) __read_mostly;
  50EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
  51
  52DEFINE_SPINLOCK(nf_conntrack_lock);
  53EXPORT_SYMBOL_GPL(nf_conntrack_lock);
  54
  55unsigned int nf_conntrack_htable_size __read_mostly;
  56EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
  57
  58unsigned int nf_conntrack_max __read_mostly;
  59EXPORT_SYMBOL_GPL(nf_conntrack_max);
  60
  61struct nf_conn nf_conntrack_untracked __read_mostly;
  62EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
  63
  64static struct kmem_cache *nf_conntrack_cachep __read_mostly;
  65
  66static int nf_conntrack_hash_rnd_initted;
  67static unsigned int nf_conntrack_hash_rnd;
  68
  69static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
  70                                  unsigned int size, unsigned int rnd)
  71{
  72        unsigned int n;
  73        u_int32_t h;
  74
  75        /* The direction must be ignored, so we hash everything up to the
  76         * destination ports (which is a multiple of 4) and treat the last
  77         * three bytes manually.
  78         */
  79        n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
  80        h = jhash2((u32 *)tuple, n,
  81                   rnd ^ (((__force __u16)tuple->dst.u.all << 16) |
  82                          tuple->dst.protonum));
  83
  84        return ((u64)h * size) >> 32;
  85}
  86
  87static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
  88{
  89        return __hash_conntrack(tuple, nf_conntrack_htable_size,
  90                                nf_conntrack_hash_rnd);
  91}
  92
  93bool
  94nf_ct_get_tuple(const struct sk_buff *skb,
  95                unsigned int nhoff,
  96                unsigned int dataoff,
  97                u_int16_t l3num,
  98                u_int8_t protonum,
  99                struct nf_conntrack_tuple *tuple,
 100                const struct nf_conntrack_l3proto *l3proto,
 101                const struct nf_conntrack_l4proto *l4proto)
 102{
 103        memset(tuple, 0, sizeof(*tuple));
 104
 105        tuple->src.l3num = l3num;
 106        if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
 107                return false;
 108
 109        tuple->dst.protonum = protonum;
 110        tuple->dst.dir = IP_CT_DIR_ORIGINAL;
 111
 112        return l4proto->pkt_to_tuple(skb, dataoff, tuple);
 113}
 114EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
 115
 116bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
 117                       u_int16_t l3num, struct nf_conntrack_tuple *tuple)
 118{
 119        struct nf_conntrack_l3proto *l3proto;
 120        struct nf_conntrack_l4proto *l4proto;
 121        unsigned int protoff;
 122        u_int8_t protonum;
 123        int ret;
 124
 125        rcu_read_lock();
 126
 127        l3proto = __nf_ct_l3proto_find(l3num);
 128        ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
 129        if (ret != NF_ACCEPT) {
 130                rcu_read_unlock();
 131                return false;
 132        }
 133
 134        l4proto = __nf_ct_l4proto_find(l3num, protonum);
 135
 136        ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple,
 137                              l3proto, l4proto);
 138
 139        rcu_read_unlock();
 140        return ret;
 141}
 142EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
 143
 144bool
 145nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
 146                   const struct nf_conntrack_tuple *orig,
 147                   const struct nf_conntrack_l3proto *l3proto,
 148                   const struct nf_conntrack_l4proto *l4proto)
 149{
 150        memset(inverse, 0, sizeof(*inverse));
 151
 152        inverse->src.l3num = orig->src.l3num;
 153        if (l3proto->invert_tuple(inverse, orig) == 0)
 154                return false;
 155
 156        inverse->dst.dir = !orig->dst.dir;
 157
 158        inverse->dst.protonum = orig->dst.protonum;
 159        return l4proto->invert_tuple(inverse, orig);
 160}
 161EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
 162
 163static void
 164clean_from_lists(struct nf_conn *ct)
 165{
 166        pr_debug("clean_from_lists(%p)\n", ct);
 167        hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
 168        hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
 169
 170        /* Destroy all pending expectations */
 171        nf_ct_remove_expectations(ct);
 172}
 173
 174static void
 175destroy_conntrack(struct nf_conntrack *nfct)
 176{
 177        struct nf_conn *ct = (struct nf_conn *)nfct;
 178        struct net *net = nf_ct_net(ct);
 179        struct nf_conntrack_l4proto *l4proto;
 180
 181        pr_debug("destroy_conntrack(%p)\n", ct);
 182        NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
 183        NF_CT_ASSERT(!timer_pending(&ct->timeout));
 184
 185        if (!test_bit(IPS_DYING_BIT, &ct->status))
 186                nf_conntrack_event(IPCT_DESTROY, ct);
 187        set_bit(IPS_DYING_BIT, &ct->status);
 188
 189        /* To make sure we don't get any weird locking issues here:
 190         * destroy_conntrack() MUST NOT be called with a write lock
 191         * to nf_conntrack_lock!!! -HW */
 192        rcu_read_lock();
 193        l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
 194        if (l4proto && l4proto->destroy)
 195                l4proto->destroy(ct);
 196
 197        rcu_read_unlock();
 198
 199        spin_lock_bh(&nf_conntrack_lock);
 200        /* Expectations will have been removed in clean_from_lists,
 201         * except TFTP can create an expectation on the first packet,
 202         * before connection is in the list, so we need to clean here,
 203         * too. */
 204        nf_ct_remove_expectations(ct);
 205
 206        /* We overload first tuple to link into unconfirmed list. */
 207        if (!nf_ct_is_confirmed(ct)) {
 208                BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
 209                hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
 210        }
 211
 212        NF_CT_STAT_INC(net, delete);
 213        spin_unlock_bh(&nf_conntrack_lock);
 214
 215        if (ct->master)
 216                nf_ct_put(ct->master);
 217
 218        pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
 219        nf_conntrack_free(ct);
 220}
 221
 222static void death_by_timeout(unsigned long ul_conntrack)
 223{
 224        struct nf_conn *ct = (void *)ul_conntrack;
 225        struct net *net = nf_ct_net(ct);
 226        struct nf_conn_help *help = nfct_help(ct);
 227        struct nf_conntrack_helper *helper;
 228
 229        if (help) {
 230                rcu_read_lock();
 231                helper = rcu_dereference(help->helper);
 232                if (helper && helper->destroy)
 233                        helper->destroy(ct);
 234                rcu_read_unlock();
 235        }
 236
 237        spin_lock_bh(&nf_conntrack_lock);
 238        /* Inside lock so preempt is disabled on module removal path.
 239         * Otherwise we can get spurious warnings. */
 240        NF_CT_STAT_INC(net, delete_list);
 241        clean_from_lists(ct);
 242        spin_unlock_bh(&nf_conntrack_lock);
 243        nf_ct_put(ct);
 244}
 245
 246/*
 247 * Warning :
 248 * - Caller must take a reference on returned object
 249 *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
 250 * OR
 251 * - Caller must lock nf_conntrack_lock before calling this function
 252 */
 253struct nf_conntrack_tuple_hash *
 254__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
 255{
 256        struct nf_conntrack_tuple_hash *h;
 257        struct hlist_nulls_node *n;
 258        unsigned int hash = hash_conntrack(tuple);
 259
 260        /* Disable BHs the entire time since we normally need to disable them
 261         * at least once for the stats anyway.
 262         */
 263        local_bh_disable();
 264begin:
 265        hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
 266                if (nf_ct_tuple_equal(tuple, &h->tuple)) {
 267                        NF_CT_STAT_INC(net, found);
 268                        local_bh_enable();
 269                        return h;
 270                }
 271                NF_CT_STAT_INC(net, searched);
 272        }
 273        /*
 274         * if the nulls value we got at the end of this lookup is
 275         * not the expected one, we must restart lookup.
 276         * We probably met an item that was moved to another chain.
 277         */
 278        if (get_nulls_value(n) != hash)
 279                goto begin;
 280        local_bh_enable();
 281
 282        return NULL;
 283}
 284EXPORT_SYMBOL_GPL(__nf_conntrack_find);
 285
 286/* Find a connection corresponding to a tuple. */
 287struct nf_conntrack_tuple_hash *
 288nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple)
 289{
 290        struct nf_conntrack_tuple_hash *h;
 291        struct nf_conn *ct;
 292
 293        rcu_read_lock();
 294begin:
 295        h = __nf_conntrack_find(net, tuple);
 296        if (h) {
 297                ct = nf_ct_tuplehash_to_ctrack(h);
 298                if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
 299                        h = NULL;
 300                else {
 301                        if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) {
 302                                nf_ct_put(ct);
 303                                goto begin;
 304                        }
 305                }
 306        }
 307        rcu_read_unlock();
 308
 309        return h;
 310}
 311EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
 312
 313static void __nf_conntrack_hash_insert(struct nf_conn *ct,
 314                                       unsigned int hash,
 315                                       unsigned int repl_hash)
 316{
 317        struct net *net = nf_ct_net(ct);
 318
 319        hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
 320                           &net->ct.hash[hash]);
 321        hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
 322                           &net->ct.hash[repl_hash]);
 323}
 324
 325void nf_conntrack_hash_insert(struct nf_conn *ct)
 326{
 327        unsigned int hash, repl_hash;
 328
 329        hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
 330        repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 331
 332        __nf_conntrack_hash_insert(ct, hash, repl_hash);
 333}
 334EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert);
 335
 336/* Confirm a connection given skb; places it in hash table */
 337int
 338__nf_conntrack_confirm(struct sk_buff *skb)
 339{
 340        unsigned int hash, repl_hash;
 341        struct nf_conntrack_tuple_hash *h;
 342        struct nf_conn *ct;
 343        struct nf_conn_help *help;
 344        struct hlist_nulls_node *n;
 345        enum ip_conntrack_info ctinfo;
 346        struct net *net;
 347
 348        ct = nf_ct_get(skb, &ctinfo);
 349        net = nf_ct_net(ct);
 350
 351        /* ipt_REJECT uses nf_conntrack_attach to attach related
 352           ICMP/TCP RST packets in other direction.  Actual packet
 353           which created connection will be IP_CT_NEW or for an
 354           expected connection, IP_CT_RELATED. */
 355        if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
 356                return NF_ACCEPT;
 357
 358        hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
 359        repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 360
 361        /* We're not in hash table, and we refuse to set up related
 362           connections for unconfirmed conns.  But packet copies and
 363           REJECT will give spurious warnings here. */
 364        /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
 365
 366        /* No external references means noone else could have
 367           confirmed us. */
 368        NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
 369        pr_debug("Confirming conntrack %p\n", ct);
 370
 371        spin_lock_bh(&nf_conntrack_lock);
 372
 373        /* See if there's one in the list already, including reverse:
 374           NAT could have grabbed it without realizing, since we're
 375           not in the hash.  If there is, we lost race. */
 376        hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
 377                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
 378                                      &h->tuple))
 379                        goto out;
 380        hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
 381                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
 382                                      &h->tuple))
 383                        goto out;
 384
 385        /* Remove from unconfirmed list */
 386        hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
 387
 388        __nf_conntrack_hash_insert(ct, hash, repl_hash);
 389        /* Timer relative to confirmation time, not original
 390           setting time, otherwise we'd get timer wrap in
 391           weird delay cases. */
 392        ct->timeout.expires += jiffies;
 393        add_timer(&ct->timeout);
 394        atomic_inc(&ct->ct_general.use);
 395        set_bit(IPS_CONFIRMED_BIT, &ct->status);
 396        NF_CT_STAT_INC(net, insert);
 397        spin_unlock_bh(&nf_conntrack_lock);
 398        help = nfct_help(ct);
 399        if (help && help->helper)
 400                nf_conntrack_event_cache(IPCT_HELPER, ct);
 401#ifdef CONFIG_NF_NAT_NEEDED
 402        if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) ||
 403            test_bit(IPS_DST_NAT_DONE_BIT, &ct->status))
 404                nf_conntrack_event_cache(IPCT_NATINFO, ct);
 405#endif
 406        nf_conntrack_event_cache(master_ct(ct) ?
 407                                 IPCT_RELATED : IPCT_NEW, ct);
 408        return NF_ACCEPT;
 409
 410out:
 411        NF_CT_STAT_INC(net, insert_failed);
 412        spin_unlock_bh(&nf_conntrack_lock);
 413        return NF_DROP;
 414}
 415EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
 416
 417/* Returns true if a connection correspondings to the tuple (required
 418   for NAT). */
 419int
 420nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
 421                         const struct nf_conn *ignored_conntrack)
 422{
 423        struct net *net = nf_ct_net(ignored_conntrack);
 424        struct nf_conntrack_tuple_hash *h;
 425        struct hlist_nulls_node *n;
 426        unsigned int hash = hash_conntrack(tuple);
 427
 428        /* Disable BHs the entire time since we need to disable them at
 429         * least once for the stats anyway.
 430         */
 431        rcu_read_lock_bh();
 432        hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
 433                if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
 434                    nf_ct_tuple_equal(tuple, &h->tuple)) {
 435                        NF_CT_STAT_INC(net, found);
 436                        rcu_read_unlock_bh();
 437                        return 1;
 438                }
 439                NF_CT_STAT_INC(net, searched);
 440        }
 441        rcu_read_unlock_bh();
 442
 443        return 0;
 444}
 445EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
 446
 447#define NF_CT_EVICTION_RANGE    8
 448
 449/* There's a small race here where we may free a just-assured
 450   connection.  Too bad: we're in trouble anyway. */
 451static noinline int early_drop(struct net *net, unsigned int hash)
 452{
 453        /* Use oldest entry, which is roughly LRU */
 454        struct nf_conntrack_tuple_hash *h;
 455        struct nf_conn *ct = NULL, *tmp;
 456        struct hlist_nulls_node *n;
 457        unsigned int i, cnt = 0;
 458        int dropped = 0;
 459
 460        rcu_read_lock();
 461        for (i = 0; i < nf_conntrack_htable_size; i++) {
 462                hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
 463                                         hnnode) {
 464                        tmp = nf_ct_tuplehash_to_ctrack(h);
 465                        if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
 466                                ct = tmp;
 467                        cnt++;
 468                }
 469
 470                if (ct && unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
 471                        ct = NULL;
 472                if (ct || cnt >= NF_CT_EVICTION_RANGE)
 473                        break;
 474                hash = (hash + 1) % nf_conntrack_htable_size;
 475        }
 476        rcu_read_unlock();
 477
 478        if (!ct)
 479                return dropped;
 480
 481        if (del_timer(&ct->timeout)) {
 482                death_by_timeout((unsigned long)ct);
 483                dropped = 1;
 484                NF_CT_STAT_INC_ATOMIC(net, early_drop);
 485        }
 486        nf_ct_put(ct);
 487        return dropped;
 488}
 489
 490struct nf_conn *nf_conntrack_alloc(struct net *net,
 491                                   const struct nf_conntrack_tuple *orig,
 492                                   const struct nf_conntrack_tuple *repl,
 493                                   gfp_t gfp)
 494{
 495        struct nf_conn *ct;
 496
 497        if (unlikely(!nf_conntrack_hash_rnd_initted)) {
 498                get_random_bytes(&nf_conntrack_hash_rnd,
 499                                sizeof(nf_conntrack_hash_rnd));
 500                nf_conntrack_hash_rnd_initted = 1;
 501        }
 502
 503        /* We don't want any race condition at early drop stage */
 504        atomic_inc(&net->ct.count);
 505
 506        if (nf_conntrack_max &&
 507            unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
 508                unsigned int hash = hash_conntrack(orig);
 509                if (!early_drop(net, hash)) {
 510                        atomic_dec(&net->ct.count);
 511                        if (net_ratelimit())
 512                                printk(KERN_WARNING
 513                                       "nf_conntrack: table full, dropping"
 514                                       " packet.\n");
 515                        return ERR_PTR(-ENOMEM);
 516                }
 517        }
 518
 519        ct = kmem_cache_zalloc(nf_conntrack_cachep, gfp);
 520        if (ct == NULL) {
 521                pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
 522                atomic_dec(&net->ct.count);
 523                return ERR_PTR(-ENOMEM);
 524        }
 525
 526        atomic_set(&ct->ct_general.use, 1);
 527        ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
 528        ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
 529        /* Don't set timer yet: wait for confirmation */
 530        setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
 531#ifdef CONFIG_NET_NS
 532        ct->ct_net = net;
 533#endif
 534
 535        return ct;
 536}
 537EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
 538
 539void nf_conntrack_free(struct nf_conn *ct)
 540{
 541        struct net *net = nf_ct_net(ct);
 542
 543        nf_ct_ext_destroy(ct);
 544        atomic_dec(&net->ct.count);
 545        nf_ct_ext_free(ct);
 546        kmem_cache_free(nf_conntrack_cachep, ct);
 547}
 548EXPORT_SYMBOL_GPL(nf_conntrack_free);
 549
 550/* Allocate a new conntrack: we return -ENOMEM if classification
 551   failed due to stress.  Otherwise it really is unclassifiable. */
 552static struct nf_conntrack_tuple_hash *
 553init_conntrack(struct net *net,
 554               const struct nf_conntrack_tuple *tuple,
 555               struct nf_conntrack_l3proto *l3proto,
 556               struct nf_conntrack_l4proto *l4proto,
 557               struct sk_buff *skb,
 558               unsigned int dataoff)
 559{
 560        struct nf_conn *ct;
 561        struct nf_conn_help *help;
 562        struct nf_conntrack_tuple repl_tuple;
 563        struct nf_conntrack_expect *exp;
 564
 565        if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
 566                pr_debug("Can't invert tuple.\n");
 567                return NULL;
 568        }
 569
 570        ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC);
 571        if (IS_ERR(ct)) {
 572                pr_debug("Can't allocate conntrack.\n");
 573                return (struct nf_conntrack_tuple_hash *)ct;
 574        }
 575
 576        if (!l4proto->new(ct, skb, dataoff)) {
 577                nf_conntrack_free(ct);
 578                pr_debug("init conntrack: can't track with proto module\n");
 579                return NULL;
 580        }
 581
 582        nf_ct_acct_ext_add(ct, GFP_ATOMIC);
 583
 584        spin_lock_bh(&nf_conntrack_lock);
 585        exp = nf_ct_find_expectation(net, tuple);
 586        if (exp) {
 587                pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
 588                         ct, exp);
 589                /* Welcome, Mr. Bond.  We've been expecting you... */
 590                __set_bit(IPS_EXPECTED_BIT, &ct->status);
 591                ct->master = exp->master;
 592                if (exp->helper) {
 593                        help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
 594                        if (help)
 595                                rcu_assign_pointer(help->helper, exp->helper);
 596                }
 597
 598#ifdef CONFIG_NF_CONNTRACK_MARK
 599                ct->mark = exp->master->mark;
 600#endif
 601#ifdef CONFIG_NF_CONNTRACK_SECMARK
 602                ct->secmark = exp->master->secmark;
 603#endif
 604                nf_conntrack_get(&ct->master->ct_general);
 605                NF_CT_STAT_INC(net, expect_new);
 606        } else {
 607                __nf_ct_try_assign_helper(ct, GFP_ATOMIC);
 608                NF_CT_STAT_INC(net, new);
 609        }
 610
 611        /* Overload tuple linked list to put us in unconfirmed list. */
 612        hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
 613                       &net->ct.unconfirmed);
 614
 615        spin_unlock_bh(&nf_conntrack_lock);
 616
 617        if (exp) {
 618                if (exp->expectfn)
 619                        exp->expectfn(ct, exp);
 620                nf_ct_expect_put(exp);
 621        }
 622
 623        return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
 624}
 625
 626/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
 627static inline struct nf_conn *
 628resolve_normal_ct(struct net *net,
 629                  struct sk_buff *skb,
 630                  unsigned int dataoff,
 631                  u_int16_t l3num,
 632                  u_int8_t protonum,
 633                  struct nf_conntrack_l3proto *l3proto,
 634                  struct nf_conntrack_l4proto *l4proto,
 635                  int *set_reply,
 636                  enum ip_conntrack_info *ctinfo)
 637{
 638        struct nf_conntrack_tuple tuple;
 639        struct nf_conntrack_tuple_hash *h;
 640        struct nf_conn *ct;
 641
 642        if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
 643                             dataoff, l3num, protonum, &tuple, l3proto,
 644                             l4proto)) {
 645                pr_debug("resolve_normal_ct: Can't get tuple\n");
 646                return NULL;
 647        }
 648
 649        /* look for tuple match */
 650        h = nf_conntrack_find_get(net, &tuple);
 651        if (!h) {
 652                h = init_conntrack(net, &tuple, l3proto, l4proto, skb, dataoff);
 653                if (!h)
 654                        return NULL;
 655                if (IS_ERR(h))
 656                        return (void *)h;
 657        }
 658        ct = nf_ct_tuplehash_to_ctrack(h);
 659
 660        /* It exists; we have (non-exclusive) reference. */
 661        if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
 662                *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
 663                /* Please set reply bit if this packet OK */
 664                *set_reply = 1;
 665        } else {
 666                /* Once we've had two way comms, always ESTABLISHED. */
 667                if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
 668                        pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
 669                        *ctinfo = IP_CT_ESTABLISHED;
 670                } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
 671                        pr_debug("nf_conntrack_in: related packet for %p\n",
 672                                 ct);
 673                        *ctinfo = IP_CT_RELATED;
 674                } else {
 675                        pr_debug("nf_conntrack_in: new packet for %p\n", ct);
 676                        *ctinfo = IP_CT_NEW;
 677                }
 678                *set_reply = 0;
 679        }
 680        skb->nfct = &ct->ct_general;
 681        skb->nfctinfo = *ctinfo;
 682        return ct;
 683}
 684
 685unsigned int
 686nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
 687                struct sk_buff *skb)
 688{
 689        struct nf_conn *ct;
 690        enum ip_conntrack_info ctinfo;
 691        struct nf_conntrack_l3proto *l3proto;
 692        struct nf_conntrack_l4proto *l4proto;
 693        unsigned int dataoff;
 694        u_int8_t protonum;
 695        int set_reply = 0;
 696        int ret;
 697
 698        /* Previously seen (loopback or untracked)?  Ignore. */
 699        if (skb->nfct) {
 700                NF_CT_STAT_INC_ATOMIC(net, ignore);
 701                return NF_ACCEPT;
 702        }
 703
 704        /* rcu_read_lock()ed by nf_hook_slow */
 705        l3proto = __nf_ct_l3proto_find(pf);
 706        ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
 707                                   &dataoff, &protonum);
 708        if (ret <= 0) {
 709                pr_debug("not prepared to track yet or error occured\n");
 710                NF_CT_STAT_INC_ATOMIC(net, error);
 711                NF_CT_STAT_INC_ATOMIC(net, invalid);
 712                return -ret;
 713        }
 714
 715        l4proto = __nf_ct_l4proto_find(pf, protonum);
 716
 717        /* It may be an special packet, error, unclean...
 718         * inverse of the return code tells to the netfilter
 719         * core what to do with the packet. */
 720        if (l4proto->error != NULL) {
 721                ret = l4proto->error(net, skb, dataoff, &ctinfo, pf, hooknum);
 722                if (ret <= 0) {
 723                        NF_CT_STAT_INC_ATOMIC(net, error);
 724                        NF_CT_STAT_INC_ATOMIC(net, invalid);
 725                        return -ret;
 726                }
 727        }
 728
 729        ct = resolve_normal_ct(net, skb, dataoff, pf, protonum,
 730                               l3proto, l4proto, &set_reply, &ctinfo);
 731        if (!ct) {
 732                /* Not valid part of a connection */
 733                NF_CT_STAT_INC_ATOMIC(net, invalid);
 734                return NF_ACCEPT;
 735        }
 736
 737        if (IS_ERR(ct)) {
 738                /* Too stressed to deal. */
 739                NF_CT_STAT_INC_ATOMIC(net, drop);
 740                return NF_DROP;
 741        }
 742
 743        NF_CT_ASSERT(skb->nfct);
 744
 745        ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum);
 746        if (ret <= 0) {
 747                /* Invalid: inverse of the return code tells
 748                 * the netfilter core what to do */
 749                pr_debug("nf_conntrack_in: Can't track with proto module\n");
 750                nf_conntrack_put(skb->nfct);
 751                skb->nfct = NULL;
 752                NF_CT_STAT_INC_ATOMIC(net, invalid);
 753                if (ret == -NF_DROP)
 754                        NF_CT_STAT_INC_ATOMIC(net, drop);
 755                return -ret;
 756        }
 757
 758        if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
 759                nf_conntrack_event_cache(IPCT_STATUS, ct);
 760
 761        return ret;
 762}
 763EXPORT_SYMBOL_GPL(nf_conntrack_in);
 764
 765bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
 766                          const struct nf_conntrack_tuple *orig)
 767{
 768        bool ret;
 769
 770        rcu_read_lock();
 771        ret = nf_ct_invert_tuple(inverse, orig,
 772                                 __nf_ct_l3proto_find(orig->src.l3num),
 773                                 __nf_ct_l4proto_find(orig->src.l3num,
 774                                                      orig->dst.protonum));
 775        rcu_read_unlock();
 776        return ret;
 777}
 778EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
 779
 780/* Alter reply tuple (maybe alter helper).  This is for NAT, and is
 781   implicitly racy: see __nf_conntrack_confirm */
 782void nf_conntrack_alter_reply(struct nf_conn *ct,
 783                              const struct nf_conntrack_tuple *newreply)
 784{
 785        struct nf_conn_help *help = nfct_help(ct);
 786
 787        /* Should be unconfirmed, so not in hash table yet */
 788        NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
 789
 790        pr_debug("Altering reply tuple of %p to ", ct);
 791        nf_ct_dump_tuple(newreply);
 792
 793        ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
 794        if (ct->master || (help && !hlist_empty(&help->expectations)))
 795                return;
 796
 797        rcu_read_lock();
 798        __nf_ct_try_assign_helper(ct, GFP_ATOMIC);
 799        rcu_read_unlock();
 800}
 801EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
 802
 803/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
 804void __nf_ct_refresh_acct(struct nf_conn *ct,
 805                          enum ip_conntrack_info ctinfo,
 806                          const struct sk_buff *skb,
 807                          unsigned long extra_jiffies,
 808                          int do_acct)
 809{
 810        int event = 0;
 811
 812        NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
 813        NF_CT_ASSERT(skb);
 814
 815        spin_lock_bh(&nf_conntrack_lock);
 816
 817        /* Only update if this is not a fixed timeout */
 818        if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
 819                goto acct;
 820
 821        /* If not in hash table, timer will not be active yet */
 822        if (!nf_ct_is_confirmed(ct)) {
 823                ct->timeout.expires = extra_jiffies;
 824                event = IPCT_REFRESH;
 825        } else {
 826                unsigned long newtime = jiffies + extra_jiffies;
 827
 828                /* Only update the timeout if the new timeout is at least
 829                   HZ jiffies from the old timeout. Need del_timer for race
 830                   avoidance (may already be dying). */
 831                if (newtime - ct->timeout.expires >= HZ
 832                    && del_timer(&ct->timeout)) {
 833                        ct->timeout.expires = newtime;
 834                        add_timer(&ct->timeout);
 835                        event = IPCT_REFRESH;
 836                }
 837        }
 838
 839acct:
 840        if (do_acct) {
 841                struct nf_conn_counter *acct;
 842
 843                acct = nf_conn_acct_find(ct);
 844                if (acct) {
 845                        acct[CTINFO2DIR(ctinfo)].packets++;
 846                        acct[CTINFO2DIR(ctinfo)].bytes +=
 847                                skb->len - skb_network_offset(skb);
 848                }
 849        }
 850
 851        spin_unlock_bh(&nf_conntrack_lock);
 852
 853        /* must be unlocked when calling event cache */
 854        if (event)
 855                nf_conntrack_event_cache(event, ct);
 856}
 857EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
 858
 859bool __nf_ct_kill_acct(struct nf_conn *ct,
 860                       enum ip_conntrack_info ctinfo,
 861                       const struct sk_buff *skb,
 862                       int do_acct)
 863{
 864        if (do_acct) {
 865                struct nf_conn_counter *acct;
 866
 867                spin_lock_bh(&nf_conntrack_lock);
 868                acct = nf_conn_acct_find(ct);
 869                if (acct) {
 870                        acct[CTINFO2DIR(ctinfo)].packets++;
 871                        acct[CTINFO2DIR(ctinfo)].bytes +=
 872                                skb->len - skb_network_offset(skb);
 873                }
 874                spin_unlock_bh(&nf_conntrack_lock);
 875        }
 876
 877        if (del_timer(&ct->timeout)) {
 878                ct->timeout.function((unsigned long)ct);
 879                return true;
 880        }
 881        return false;
 882}
 883EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
 884
 885#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
 886
 887#include <linux/netfilter/nfnetlink.h>
 888#include <linux/netfilter/nfnetlink_conntrack.h>
 889#include <linux/mutex.h>
 890
 891/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
 892 * in ip_conntrack_core, since we don't want the protocols to autoload
 893 * or depend on ctnetlink */
 894int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
 895                               const struct nf_conntrack_tuple *tuple)
 896{
 897        NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port);
 898        NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port);
 899        return 0;
 900
 901nla_put_failure:
 902        return -1;
 903}
 904EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
 905
 906const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
 907        [CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
 908        [CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
 909};
 910EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
 911
 912int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
 913                               struct nf_conntrack_tuple *t)
 914{
 915        if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
 916                return -EINVAL;
 917
 918        t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
 919        t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
 920
 921        return 0;
 922}
 923EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
 924
 925int nf_ct_port_nlattr_tuple_size(void)
 926{
 927        return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
 928}
 929EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
 930#endif
 931
 932/* Used by ipt_REJECT and ip6t_REJECT. */
 933static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
 934{
 935        struct nf_conn *ct;
 936        enum ip_conntrack_info ctinfo;
 937
 938        /* This ICMP is in reverse direction to the packet which caused it */
 939        ct = nf_ct_get(skb, &ctinfo);
 940        if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
 941                ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
 942        else
 943                ctinfo = IP_CT_RELATED;
 944
 945        /* Attach to new skbuff, and increment count */
 946        nskb->nfct = &ct->ct_general;
 947        nskb->nfctinfo = ctinfo;
 948        nf_conntrack_get(nskb->nfct);
 949}
 950
 951/* Bring out ya dead! */
 952static struct nf_conn *
 953get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
 954                void *data, unsigned int *bucket)
 955{
 956        struct nf_conntrack_tuple_hash *h;
 957        struct nf_conn *ct;
 958        struct hlist_nulls_node *n;
 959
 960        spin_lock_bh(&nf_conntrack_lock);
 961        for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
 962                hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
 963                        ct = nf_ct_tuplehash_to_ctrack(h);
 964                        if (iter(ct, data))
 965                                goto found;
 966                }
 967        }
 968        hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) {
 969                ct = nf_ct_tuplehash_to_ctrack(h);
 970                if (iter(ct, data))
 971                        set_bit(IPS_DYING_BIT, &ct->status);
 972        }
 973        spin_unlock_bh(&nf_conntrack_lock);
 974        return NULL;
 975found:
 976        atomic_inc(&ct->ct_general.use);
 977        spin_unlock_bh(&nf_conntrack_lock);
 978        return ct;
 979}
 980
 981void nf_ct_iterate_cleanup(struct net *net,
 982                           int (*iter)(struct nf_conn *i, void *data),
 983                           void *data)
 984{
 985        struct nf_conn *ct;
 986        unsigned int bucket = 0;
 987
 988        while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
 989                /* Time to push up daises... */
 990                if (del_timer(&ct->timeout))
 991                        death_by_timeout((unsigned long)ct);
 992                /* ... else the timer will get him soon. */
 993
 994                nf_ct_put(ct);
 995        }
 996}
 997EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
 998
 999struct __nf_ct_flush_report {
1000        u32 pid;
1001        int report;
1002};
1003
1004static int kill_all(struct nf_conn *i, void *data)
1005{
1006        struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
1007
1008        /* get_next_corpse sets the dying bit for us */
1009        nf_conntrack_event_report(IPCT_DESTROY,
1010                                  i,
1011                                  fr->pid,
1012                                  fr->report);
1013        return 1;
1014}
1015
1016void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size)
1017{
1018        if (vmalloced)
1019                vfree(hash);
1020        else
1021                free_pages((unsigned long)hash,
1022                           get_order(sizeof(struct hlist_head) * size));
1023}
1024EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1025
1026void nf_conntrack_flush(struct net *net, u32 pid, int report)
1027{
1028        struct __nf_ct_flush_report fr = {
1029                .pid    = pid,
1030                .report = report,
1031        };
1032        nf_ct_iterate_cleanup(net, kill_all, &fr);
1033}
1034EXPORT_SYMBOL_GPL(nf_conntrack_flush);
1035
1036static void nf_conntrack_cleanup_init_net(void)
1037{
1038        nf_conntrack_helper_fini();
1039        nf_conntrack_proto_fini();
1040        kmem_cache_destroy(nf_conntrack_cachep);
1041}
1042
1043static void nf_conntrack_cleanup_net(struct net *net)
1044{
1045        nf_ct_event_cache_flush(net);
1046        nf_conntrack_ecache_fini(net);
1047 i_see_dead_people:
1048        nf_conntrack_flush(net, 0, 0);
1049        if (atomic_read(&net->ct.count) != 0) {
1050                schedule();
1051                goto i_see_dead_people;
1052        }
1053        /* wait until all references to nf_conntrack_untracked are dropped */
1054        while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1055                schedule();
1056
1057        nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1058                             nf_conntrack_htable_size);
1059        nf_conntrack_acct_fini(net);
1060        nf_conntrack_expect_fini(net);
1061        free_percpu(net->ct.stat);
1062}
1063
1064/* Mishearing the voices in his head, our hero wonders how he's
1065   supposed to kill the mall. */
1066void nf_conntrack_cleanup(struct net *net)
1067{
1068        if (net_eq(net, &init_net))
1069                rcu_assign_pointer(ip_ct_attach, NULL);
1070
1071        /* This makes sure all current packets have passed through
1072           netfilter framework.  Roll on, two-stage module
1073           delete... */
1074        synchronize_net();
1075
1076        nf_conntrack_cleanup_net(net);
1077
1078        if (net_eq(net, &init_net)) {
1079                rcu_assign_pointer(nf_ct_destroy, NULL);
1080                nf_conntrack_cleanup_init_net();
1081        }
1082}
1083
1084void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
1085{
1086        struct hlist_nulls_head *hash;
1087        unsigned int nr_slots, i;
1088        size_t sz;
1089
1090        *vmalloced = 0;
1091
1092        BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1093        nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1094        sz = nr_slots * sizeof(struct hlist_nulls_head);
1095        hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1096                                        get_order(sz));
1097        if (!hash) {
1098                *vmalloced = 1;
1099                printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1100                hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
1101        }
1102
1103        if (hash && nulls)
1104                for (i = 0; i < nr_slots; i++)
1105                        INIT_HLIST_NULLS_HEAD(&hash[i], i);
1106
1107        return hash;
1108}
1109EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1110
1111int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1112{
1113        int i, bucket, vmalloced, old_vmalloced;
1114        unsigned int hashsize, old_size;
1115        int rnd;
1116        struct hlist_nulls_head *hash, *old_hash;
1117        struct nf_conntrack_tuple_hash *h;
1118
1119        /* On boot, we can set this without any fancy locking. */
1120        if (!nf_conntrack_htable_size)
1121                return param_set_uint(val, kp);
1122
1123        hashsize = simple_strtoul(val, NULL, 0);
1124        if (!hashsize)
1125                return -EINVAL;
1126
1127        hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1);
1128        if (!hash)
1129                return -ENOMEM;
1130
1131        /* We have to rehahs for the new table anyway, so we also can
1132         * use a newrandom seed */
1133        get_random_bytes(&rnd, sizeof(rnd));
1134
1135        /* Lookups in the old hash might happen in parallel, which means we
1136         * might get false negatives during connection lookup. New connections
1137         * created because of a false negative won't make it into the hash
1138         * though since that required taking the lock.
1139         */
1140        spin_lock_bh(&nf_conntrack_lock);
1141        for (i = 0; i < nf_conntrack_htable_size; i++) {
1142                while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1143                        h = hlist_nulls_entry(init_net.ct.hash[i].first,
1144                                        struct nf_conntrack_tuple_hash, hnnode);
1145                        hlist_nulls_del_rcu(&h->hnnode);
1146                        bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
1147                        hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1148                }
1149        }
1150        old_size = nf_conntrack_htable_size;
1151        old_vmalloced = init_net.ct.hash_vmalloc;
1152        old_hash = init_net.ct.hash;
1153
1154        nf_conntrack_htable_size = hashsize;
1155        init_net.ct.hash_vmalloc = vmalloced;
1156        init_net.ct.hash = hash;
1157        nf_conntrack_hash_rnd = rnd;
1158        spin_unlock_bh(&nf_conntrack_lock);
1159
1160        nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
1161        return 0;
1162}
1163EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1164
1165module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1166                  &nf_conntrack_htable_size, 0600);
1167
1168static int nf_conntrack_init_init_net(void)
1169{
1170        int max_factor = 8;
1171        int ret;
1172
1173        /* Idea from tcp.c: use 1/16384 of memory.  On i386: 32MB
1174         * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
1175        if (!nf_conntrack_htable_size) {
1176                nf_conntrack_htable_size
1177                        = (((num_physpages << PAGE_SHIFT) / 16384)
1178                           / sizeof(struct hlist_head));
1179                if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
1180                        nf_conntrack_htable_size = 16384;
1181                if (nf_conntrack_htable_size < 32)
1182                        nf_conntrack_htable_size = 32;
1183
1184                /* Use a max. factor of four by default to get the same max as
1185                 * with the old struct list_heads. When a table size is given
1186                 * we use the old value of 8 to avoid reducing the max.
1187                 * entries. */
1188                max_factor = 4;
1189        }
1190        nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1191
1192        printk("nf_conntrack version %s (%u buckets, %d max)\n",
1193               NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1194               nf_conntrack_max);
1195
1196        nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
1197                                                sizeof(struct nf_conn),
1198                                                0, SLAB_DESTROY_BY_RCU, NULL);
1199        if (!nf_conntrack_cachep) {
1200                printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1201                ret = -ENOMEM;
1202                goto err_cache;
1203        }
1204
1205        ret = nf_conntrack_proto_init();
1206        if (ret < 0)
1207                goto err_proto;
1208
1209        ret = nf_conntrack_helper_init();
1210        if (ret < 0)
1211                goto err_helper;
1212
1213        return 0;
1214
1215err_helper:
1216        nf_conntrack_proto_fini();
1217err_proto:
1218        kmem_cache_destroy(nf_conntrack_cachep);
1219err_cache:
1220        return ret;
1221}
1222
1223static int nf_conntrack_init_net(struct net *net)
1224{
1225        int ret;
1226
1227        atomic_set(&net->ct.count, 0);
1228        INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, 0);
1229        net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1230        if (!net->ct.stat) {
1231                ret = -ENOMEM;
1232                goto err_stat;
1233        }
1234        ret = nf_conntrack_ecache_init(net);
1235        if (ret < 0)
1236                goto err_ecache;
1237        net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
1238                                             &net->ct.hash_vmalloc, 1);
1239        if (!net->ct.hash) {
1240                ret = -ENOMEM;
1241                printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1242                goto err_hash;
1243        }
1244        ret = nf_conntrack_expect_init(net);
1245        if (ret < 0)
1246                goto err_expect;
1247        ret = nf_conntrack_acct_init(net);
1248        if (ret < 0)
1249                goto err_acct;
1250
1251        /* Set up fake conntrack:
1252            - to never be deleted, not in any hashes */
1253#ifdef CONFIG_NET_NS
1254        nf_conntrack_untracked.ct_net = &init_net;
1255#endif
1256        atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
1257        /*  - and look it like as a confirmed connection */
1258        set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
1259
1260        return 0;
1261
1262err_acct:
1263        nf_conntrack_expect_fini(net);
1264err_expect:
1265        nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1266                             nf_conntrack_htable_size);
1267err_hash:
1268        nf_conntrack_ecache_fini(net);
1269err_ecache:
1270        free_percpu(net->ct.stat);
1271err_stat:
1272        return ret;
1273}
1274
1275int nf_conntrack_init(struct net *net)
1276{
1277        int ret;
1278
1279        if (net_eq(net, &init_net)) {
1280                ret = nf_conntrack_init_init_net();
1281                if (ret < 0)
1282                        goto out_init_net;
1283        }
1284        ret = nf_conntrack_init_net(net);
1285        if (ret < 0)
1286                goto out_net;
1287
1288        if (net_eq(net, &init_net)) {
1289                /* For use by REJECT target */
1290                rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach);
1291                rcu_assign_pointer(nf_ct_destroy, destroy_conntrack);
1292        }
1293        return 0;
1294
1295out_net:
1296        if (net_eq(net, &init_net))
1297                nf_conntrack_cleanup_init_net();
1298out_init_net:
1299        return ret;
1300}
1301