1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/types.h>
15#include <linux/netfilter.h>
16#include <linux/module.h>
17#include <linux/sched.h>
18#include <linux/skbuff.h>
19#include <linux/proc_fs.h>
20#include <linux/vmalloc.h>
21#include <linux/stddef.h>
22#include <linux/slab.h>
23#include <linux/random.h>
24#include <linux/jhash.h>
25#include <linux/err.h>
26#include <linux/percpu.h>
27#include <linux/moduleparam.h>
28#include <linux/notifier.h>
29#include <linux/kernel.h>
30#include <linux/netdevice.h>
31#include <linux/socket.h>
32#include <linux/mm.h>
33#include <linux/nsproxy.h>
34#include <linux/rculist_nulls.h>
35
36#include <net/netfilter/nf_conntrack.h>
37#include <net/netfilter/nf_conntrack_l3proto.h>
38#include <net/netfilter/nf_conntrack_l4proto.h>
39#include <net/netfilter/nf_conntrack_expect.h>
40#include <net/netfilter/nf_conntrack_helper.h>
41#include <net/netfilter/nf_conntrack_core.h>
42#include <net/netfilter/nf_conntrack_extend.h>
43#include <net/netfilter/nf_conntrack_acct.h>
44#include <net/netfilter/nf_conntrack_ecache.h>
45#include <net/netfilter/nf_conntrack_zones.h>
46#include <net/netfilter/nf_conntrack_timestamp.h>
47#include <net/netfilter/nf_nat.h>
48#include <net/netfilter/nf_nat_core.h>
49
50#define NF_CONNTRACK_VERSION "0.5.0"
51
52int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
53 enum nf_nat_manip_type manip,
54 const struct nlattr *attr) __read_mostly;
55EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
56
57DEFINE_SPINLOCK(nf_conntrack_lock);
58EXPORT_SYMBOL_GPL(nf_conntrack_lock);
59
60unsigned int nf_conntrack_htable_size __read_mostly;
61EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
62
63unsigned int nf_conntrack_max __read_mostly;
64EXPORT_SYMBOL_GPL(nf_conntrack_max);
65
66DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
67EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
68
69unsigned int nf_conntrack_hash_rnd __read_mostly;
70EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
71
72static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
73{
74 unsigned int n;
75
76
77
78
79
80 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
81 return jhash2((u32 *)tuple, n, zone ^ nf_conntrack_hash_rnd ^
82 (((__force __u16)tuple->dst.u.all << 16) |
83 tuple->dst.protonum));
84}
85
86static u32 __hash_bucket(u32 hash, unsigned int size)
87{
88 return ((u64)hash * size) >> 32;
89}
90
91static u32 hash_bucket(u32 hash, const struct net *net)
92{
93 return __hash_bucket(hash, net->ct.htable_size);
94}
95
96static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
97 u16 zone, unsigned int size)
98{
99 return __hash_bucket(hash_conntrack_raw(tuple, zone), size);
100}
101
102static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
103 const struct nf_conntrack_tuple *tuple)
104{
105 return __hash_conntrack(tuple, zone, net->ct.htable_size);
106}
107
108bool
109nf_ct_get_tuple(const struct sk_buff *skb,
110 unsigned int nhoff,
111 unsigned int dataoff,
112 u_int16_t l3num,
113 u_int8_t protonum,
114 struct nf_conntrack_tuple *tuple,
115 const struct nf_conntrack_l3proto *l3proto,
116 const struct nf_conntrack_l4proto *l4proto)
117{
118 memset(tuple, 0, sizeof(*tuple));
119
120 tuple->src.l3num = l3num;
121 if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
122 return false;
123
124 tuple->dst.protonum = protonum;
125 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
126
127 return l4proto->pkt_to_tuple(skb, dataoff, tuple);
128}
129EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
130
131bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
132 u_int16_t l3num, struct nf_conntrack_tuple *tuple)
133{
134 struct nf_conntrack_l3proto *l3proto;
135 struct nf_conntrack_l4proto *l4proto;
136 unsigned int protoff;
137 u_int8_t protonum;
138 int ret;
139
140 rcu_read_lock();
141
142 l3proto = __nf_ct_l3proto_find(l3num);
143 ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
144 if (ret != NF_ACCEPT) {
145 rcu_read_unlock();
146 return false;
147 }
148
149 l4proto = __nf_ct_l4proto_find(l3num, protonum);
150
151 ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple,
152 l3proto, l4proto);
153
154 rcu_read_unlock();
155 return ret;
156}
157EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
158
159bool
160nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
161 const struct nf_conntrack_tuple *orig,
162 const struct nf_conntrack_l3proto *l3proto,
163 const struct nf_conntrack_l4proto *l4proto)
164{
165 memset(inverse, 0, sizeof(*inverse));
166
167 inverse->src.l3num = orig->src.l3num;
168 if (l3proto->invert_tuple(inverse, orig) == 0)
169 return false;
170
171 inverse->dst.dir = !orig->dst.dir;
172
173 inverse->dst.protonum = orig->dst.protonum;
174 return l4proto->invert_tuple(inverse, orig);
175}
176EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
177
178static void
179clean_from_lists(struct nf_conn *ct)
180{
181 pr_debug("clean_from_lists(%p)\n", ct);
182 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
183 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
184
185
186 nf_ct_remove_expectations(ct);
187}
188
189static void
190destroy_conntrack(struct nf_conntrack *nfct)
191{
192 struct nf_conn *ct = (struct nf_conn *)nfct;
193 struct net *net = nf_ct_net(ct);
194 struct nf_conntrack_l4proto *l4proto;
195
196 pr_debug("destroy_conntrack(%p)\n", ct);
197 NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
198 NF_CT_ASSERT(!timer_pending(&ct->timeout));
199
200
201
202
203 rcu_read_lock();
204 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
205 if (l4proto && l4proto->destroy)
206 l4proto->destroy(ct);
207
208 rcu_read_unlock();
209
210 spin_lock_bh(&nf_conntrack_lock);
211
212
213
214
215 nf_ct_remove_expectations(ct);
216
217
218 if (!nf_ct_is_confirmed(ct)) {
219 BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
220 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
221 }
222
223 NF_CT_STAT_INC(net, delete);
224 spin_unlock_bh(&nf_conntrack_lock);
225
226 if (ct->master)
227 nf_ct_put(ct->master);
228
229 pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
230 nf_conntrack_free(ct);
231}
232
233void nf_ct_delete_from_lists(struct nf_conn *ct)
234{
235 struct net *net = nf_ct_net(ct);
236
237 nf_ct_helper_destroy(ct);
238 spin_lock_bh(&nf_conntrack_lock);
239
240
241 NF_CT_STAT_INC(net, delete_list);
242 clean_from_lists(ct);
243 spin_unlock_bh(&nf_conntrack_lock);
244}
245EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists);
246
247static void death_by_event(unsigned long ul_conntrack)
248{
249 struct nf_conn *ct = (void *)ul_conntrack;
250 struct net *net = nf_ct_net(ct);
251
252 if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
253
254 ct->timeout.expires = jiffies +
255 (random32() % net->ct.sysctl_events_retry_timeout);
256 add_timer(&ct->timeout);
257 return;
258 }
259
260 set_bit(IPS_DYING_BIT, &ct->status);
261 spin_lock(&nf_conntrack_lock);
262 hlist_nulls_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
263 spin_unlock(&nf_conntrack_lock);
264 nf_ct_put(ct);
265}
266
267void nf_ct_insert_dying_list(struct nf_conn *ct)
268{
269 struct net *net = nf_ct_net(ct);
270
271
272 spin_lock_bh(&nf_conntrack_lock);
273 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
274 &net->ct.dying);
275 spin_unlock_bh(&nf_conntrack_lock);
276
277 setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
278 ct->timeout.expires = jiffies +
279 (random32() % net->ct.sysctl_events_retry_timeout);
280 add_timer(&ct->timeout);
281}
282EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
283
284static void death_by_timeout(unsigned long ul_conntrack)
285{
286 struct nf_conn *ct = (void *)ul_conntrack;
287 struct nf_conn_tstamp *tstamp;
288
289 tstamp = nf_conn_tstamp_find(ct);
290 if (tstamp && tstamp->stop == 0)
291 tstamp->stop = ktime_to_ns(ktime_get_real());
292
293 if (!test_bit(IPS_DYING_BIT, &ct->status) &&
294 unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
295
296 nf_ct_delete_from_lists(ct);
297 nf_ct_insert_dying_list(ct);
298 return;
299 }
300 set_bit(IPS_DYING_BIT, &ct->status);
301 nf_ct_delete_from_lists(ct);
302 nf_ct_put(ct);
303}
304
305
306
307
308
309
310
311
312static struct nf_conntrack_tuple_hash *
313____nf_conntrack_find(struct net *net, u16 zone,
314 const struct nf_conntrack_tuple *tuple, u32 hash)
315{
316 struct nf_conntrack_tuple_hash *h;
317 struct hlist_nulls_node *n;
318 unsigned int bucket = hash_bucket(hash, net);
319
320
321
322
323 local_bh_disable();
324begin:
325 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
326 if (nf_ct_tuple_equal(tuple, &h->tuple) &&
327 nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
328 NF_CT_STAT_INC(net, found);
329 local_bh_enable();
330 return h;
331 }
332 NF_CT_STAT_INC(net, searched);
333 }
334
335
336
337
338
339 if (get_nulls_value(n) != bucket) {
340 NF_CT_STAT_INC(net, search_restart);
341 goto begin;
342 }
343 local_bh_enable();
344
345 return NULL;
346}
347
348struct nf_conntrack_tuple_hash *
349__nf_conntrack_find(struct net *net, u16 zone,
350 const struct nf_conntrack_tuple *tuple)
351{
352 return ____nf_conntrack_find(net, zone, tuple,
353 hash_conntrack_raw(tuple, zone));
354}
355EXPORT_SYMBOL_GPL(__nf_conntrack_find);
356
357
358static struct nf_conntrack_tuple_hash *
359__nf_conntrack_find_get(struct net *net, u16 zone,
360 const struct nf_conntrack_tuple *tuple, u32 hash)
361{
362 struct nf_conntrack_tuple_hash *h;
363 struct nf_conn *ct;
364
365 rcu_read_lock();
366begin:
367 h = ____nf_conntrack_find(net, zone, tuple, hash);
368 if (h) {
369 ct = nf_ct_tuplehash_to_ctrack(h);
370 if (unlikely(nf_ct_is_dying(ct) ||
371 !atomic_inc_not_zero(&ct->ct_general.use)))
372 h = NULL;
373 else {
374 if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) ||
375 nf_ct_zone(ct) != zone)) {
376 nf_ct_put(ct);
377 goto begin;
378 }
379 }
380 }
381 rcu_read_unlock();
382
383 return h;
384}
385
386struct nf_conntrack_tuple_hash *
387nf_conntrack_find_get(struct net *net, u16 zone,
388 const struct nf_conntrack_tuple *tuple)
389{
390 return __nf_conntrack_find_get(net, zone, tuple,
391 hash_conntrack_raw(tuple, zone));
392}
393EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
394
395static void __nf_conntrack_hash_insert(struct nf_conn *ct,
396 unsigned int hash,
397 unsigned int repl_hash)
398{
399 struct net *net = nf_ct_net(ct);
400
401 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
402 &net->ct.hash[hash]);
403 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
404 &net->ct.hash[repl_hash]);
405}
406
407int
408nf_conntrack_hash_check_insert(struct nf_conn *ct)
409{
410 struct net *net = nf_ct_net(ct);
411 unsigned int hash, repl_hash;
412 struct nf_conntrack_tuple_hash *h;
413 struct hlist_nulls_node *n;
414 u16 zone;
415
416 zone = nf_ct_zone(ct);
417 hash = hash_conntrack(net, zone,
418 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
419 repl_hash = hash_conntrack(net, zone,
420 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
421
422 spin_lock_bh(&nf_conntrack_lock);
423
424
425 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
426 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
427 &h->tuple) &&
428 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
429 goto out;
430 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
431 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
432 &h->tuple) &&
433 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
434 goto out;
435
436 add_timer(&ct->timeout);
437 nf_conntrack_get(&ct->ct_general);
438 __nf_conntrack_hash_insert(ct, hash, repl_hash);
439 NF_CT_STAT_INC(net, insert);
440 spin_unlock_bh(&nf_conntrack_lock);
441
442 return 0;
443
444out:
445 NF_CT_STAT_INC(net, insert_failed);
446 spin_unlock_bh(&nf_conntrack_lock);
447 return -EEXIST;
448}
449EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
450
451
452int
453__nf_conntrack_confirm(struct sk_buff *skb)
454{
455 unsigned int hash, repl_hash;
456 struct nf_conntrack_tuple_hash *h;
457 struct nf_conn *ct;
458 struct nf_conn_help *help;
459 struct nf_conn_tstamp *tstamp;
460 struct hlist_nulls_node *n;
461 enum ip_conntrack_info ctinfo;
462 struct net *net;
463 u16 zone;
464
465 ct = nf_ct_get(skb, &ctinfo);
466 net = nf_ct_net(ct);
467
468
469
470
471
472 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
473 return NF_ACCEPT;
474
475 zone = nf_ct_zone(ct);
476
477 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
478 hash = hash_bucket(hash, net);
479 repl_hash = hash_conntrack(net, zone,
480 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
481
482
483
484
485
486
487
488
489 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
490 pr_debug("Confirming conntrack %p\n", ct);
491
492 spin_lock_bh(&nf_conntrack_lock);
493
494
495
496
497
498
499 if (unlikely(nf_ct_is_dying(ct))) {
500 spin_unlock_bh(&nf_conntrack_lock);
501 return NF_ACCEPT;
502 }
503
504
505
506
507 hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
508 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
509 &h->tuple) &&
510 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
511 goto out;
512 hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
513 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
514 &h->tuple) &&
515 zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
516 goto out;
517
518
519 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
520
521
522
523
524 ct->timeout.expires += jiffies;
525 add_timer(&ct->timeout);
526 atomic_inc(&ct->ct_general.use);
527 ct->status |= IPS_CONFIRMED;
528
529
530 tstamp = nf_conn_tstamp_find(ct);
531 if (tstamp) {
532 if (skb->tstamp.tv64 == 0)
533 __net_timestamp((struct sk_buff *)skb);
534
535 tstamp->start = ktime_to_ns(skb->tstamp);
536 }
537
538
539
540
541
542 __nf_conntrack_hash_insert(ct, hash, repl_hash);
543 NF_CT_STAT_INC(net, insert);
544 spin_unlock_bh(&nf_conntrack_lock);
545
546 help = nfct_help(ct);
547 if (help && help->helper)
548 nf_conntrack_event_cache(IPCT_HELPER, ct);
549
550 nf_conntrack_event_cache(master_ct(ct) ?
551 IPCT_RELATED : IPCT_NEW, ct);
552 return NF_ACCEPT;
553
554out:
555 NF_CT_STAT_INC(net, insert_failed);
556 spin_unlock_bh(&nf_conntrack_lock);
557 return NF_DROP;
558}
559EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
560
561
562
563int
564nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
565 const struct nf_conn *ignored_conntrack)
566{
567 struct net *net = nf_ct_net(ignored_conntrack);
568 struct nf_conntrack_tuple_hash *h;
569 struct hlist_nulls_node *n;
570 struct nf_conn *ct;
571 u16 zone = nf_ct_zone(ignored_conntrack);
572 unsigned int hash = hash_conntrack(net, zone, tuple);
573
574
575
576
577 rcu_read_lock_bh();
578 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
579 ct = nf_ct_tuplehash_to_ctrack(h);
580 if (ct != ignored_conntrack &&
581 nf_ct_tuple_equal(tuple, &h->tuple) &&
582 nf_ct_zone(ct) == zone) {
583 NF_CT_STAT_INC(net, found);
584 rcu_read_unlock_bh();
585 return 1;
586 }
587 NF_CT_STAT_INC(net, searched);
588 }
589 rcu_read_unlock_bh();
590
591 return 0;
592}
593EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
594
595#define NF_CT_EVICTION_RANGE 8
596
597
598
599static noinline int early_drop(struct net *net, unsigned int hash)
600{
601
602 struct nf_conntrack_tuple_hash *h;
603 struct nf_conn *ct = NULL, *tmp;
604 struct hlist_nulls_node *n;
605 unsigned int i, cnt = 0;
606 int dropped = 0;
607
608 rcu_read_lock();
609 for (i = 0; i < net->ct.htable_size; i++) {
610 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
611 hnnode) {
612 tmp = nf_ct_tuplehash_to_ctrack(h);
613 if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
614 ct = tmp;
615 cnt++;
616 }
617
618 if (ct != NULL) {
619 if (likely(!nf_ct_is_dying(ct) &&
620 atomic_inc_not_zero(&ct->ct_general.use)))
621 break;
622 else
623 ct = NULL;
624 }
625
626 if (cnt >= NF_CT_EVICTION_RANGE)
627 break;
628
629 hash = (hash + 1) % net->ct.htable_size;
630 }
631 rcu_read_unlock();
632
633 if (!ct)
634 return dropped;
635
636 if (del_timer(&ct->timeout)) {
637 death_by_timeout((unsigned long)ct);
638
639
640 if (test_bit(IPS_DYING_BIT, &ct->status)) {
641 dropped = 1;
642 NF_CT_STAT_INC_ATOMIC(net, early_drop);
643 }
644 }
645 nf_ct_put(ct);
646 return dropped;
647}
648
649void init_nf_conntrack_hash_rnd(void)
650{
651 unsigned int rand;
652
653
654
655
656
657
658 do {
659 get_random_bytes(&rand, sizeof(rand));
660 } while (!rand);
661 cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
662}
663
664static struct nf_conn *
665__nf_conntrack_alloc(struct net *net, u16 zone,
666 const struct nf_conntrack_tuple *orig,
667 const struct nf_conntrack_tuple *repl,
668 gfp_t gfp, u32 hash)
669{
670 struct nf_conn *ct;
671
672 if (unlikely(!nf_conntrack_hash_rnd)) {
673 init_nf_conntrack_hash_rnd();
674
675 hash = hash_conntrack_raw(orig, zone);
676 }
677
678
679 atomic_inc(&net->ct.count);
680
681 if (nf_conntrack_max &&
682 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
683 if (!early_drop(net, hash_bucket(hash, net))) {
684 atomic_dec(&net->ct.count);
685 if (net_ratelimit())
686 printk(KERN_WARNING
687 "nf_conntrack: table full, dropping"
688 " packet.\n");
689 return ERR_PTR(-ENOMEM);
690 }
691 }
692
693
694
695
696
697 ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
698 if (ct == NULL) {
699 atomic_dec(&net->ct.count);
700 return ERR_PTR(-ENOMEM);
701 }
702
703
704
705
706 memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
707 offsetof(struct nf_conn, proto) -
708 offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
709 spin_lock_init(&ct->lock);
710 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
711 ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
712 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
713
714 *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
715
716 setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
717 write_pnet(&ct->ct_net, net);
718#ifdef CONFIG_NF_CONNTRACK_ZONES
719 if (zone) {
720 struct nf_conntrack_zone *nf_ct_zone;
721
722 nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
723 if (!nf_ct_zone)
724 goto out_free;
725 nf_ct_zone->id = zone;
726 }
727#endif
728
729
730
731 smp_wmb();
732 atomic_set(&ct->ct_general.use, 1);
733 return ct;
734
735#ifdef CONFIG_NF_CONNTRACK_ZONES
736out_free:
737 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
738 return ERR_PTR(-ENOMEM);
739#endif
740}
741
742struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
743 const struct nf_conntrack_tuple *orig,
744 const struct nf_conntrack_tuple *repl,
745 gfp_t gfp)
746{
747 return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
748}
749EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
750
751void nf_conntrack_free(struct nf_conn *ct)
752{
753 struct net *net = nf_ct_net(ct);
754
755 nf_ct_ext_destroy(ct);
756 atomic_dec(&net->ct.count);
757 nf_ct_ext_free(ct);
758 kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
759}
760EXPORT_SYMBOL_GPL(nf_conntrack_free);
761
762
763
764static struct nf_conntrack_tuple_hash *
765init_conntrack(struct net *net, struct nf_conn *tmpl,
766 const struct nf_conntrack_tuple *tuple,
767 struct nf_conntrack_l3proto *l3proto,
768 struct nf_conntrack_l4proto *l4proto,
769 struct sk_buff *skb,
770 unsigned int dataoff, u32 hash)
771{
772 struct nf_conn *ct;
773 struct nf_conn_help *help;
774 struct nf_conntrack_tuple repl_tuple;
775 struct nf_conntrack_ecache *ecache;
776 struct nf_conntrack_expect *exp;
777 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
778
779 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
780 pr_debug("Can't invert tuple.\n");
781 return NULL;
782 }
783
784 ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
785 hash);
786 if (IS_ERR(ct))
787 return (struct nf_conntrack_tuple_hash *)ct;
788
789 if (!l4proto->new(ct, skb, dataoff)) {
790 nf_conntrack_free(ct);
791 pr_debug("init conntrack: can't track with proto module\n");
792 return NULL;
793 }
794
795 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
796 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
797
798 ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
799 nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
800 ecache ? ecache->expmask : 0,
801 GFP_ATOMIC);
802
803 spin_lock_bh(&nf_conntrack_lock);
804 exp = nf_ct_find_expectation(net, zone, tuple);
805 if (exp) {
806 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
807 ct, exp);
808
809 __set_bit(IPS_EXPECTED_BIT, &ct->status);
810 ct->master = exp->master;
811 if (exp->helper) {
812 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
813 if (help)
814 rcu_assign_pointer(help->helper, exp->helper);
815 }
816
817#ifdef CONFIG_NF_CONNTRACK_MARK
818 ct->mark = exp->master->mark;
819#endif
820#ifdef CONFIG_NF_CONNTRACK_SECMARK
821 ct->secmark = exp->master->secmark;
822#endif
823 nf_conntrack_get(&ct->master->ct_general);
824 NF_CT_STAT_INC(net, expect_new);
825 } else {
826 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
827 NF_CT_STAT_INC(net, new);
828 }
829
830
831 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
832 &net->ct.unconfirmed);
833
834 spin_unlock_bh(&nf_conntrack_lock);
835
836 if (exp) {
837 if (exp->expectfn)
838 exp->expectfn(ct, exp);
839 nf_ct_expect_put(exp);
840 }
841
842 return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
843}
844
845
846static inline struct nf_conn *
847resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
848 struct sk_buff *skb,
849 unsigned int dataoff,
850 u_int16_t l3num,
851 u_int8_t protonum,
852 struct nf_conntrack_l3proto *l3proto,
853 struct nf_conntrack_l4proto *l4proto,
854 int *set_reply,
855 enum ip_conntrack_info *ctinfo)
856{
857 struct nf_conntrack_tuple tuple;
858 struct nf_conntrack_tuple_hash *h;
859 struct nf_conn *ct;
860 u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
861 u32 hash;
862
863 if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
864 dataoff, l3num, protonum, &tuple, l3proto,
865 l4proto)) {
866 pr_debug("resolve_normal_ct: Can't get tuple\n");
867 return NULL;
868 }
869
870
871 hash = hash_conntrack_raw(&tuple, zone);
872 h = __nf_conntrack_find_get(net, zone, &tuple, hash);
873 if (!h) {
874 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
875 skb, dataoff, hash);
876 if (!h)
877 return NULL;
878 if (IS_ERR(h))
879 return (void *)h;
880 }
881 ct = nf_ct_tuplehash_to_ctrack(h);
882
883
884 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
885 *ctinfo = IP_CT_ESTABLISHED_REPLY;
886
887 *set_reply = 1;
888 } else {
889
890 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
891 pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
892 *ctinfo = IP_CT_ESTABLISHED;
893 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
894 pr_debug("nf_conntrack_in: related packet for %p\n",
895 ct);
896 *ctinfo = IP_CT_RELATED;
897 } else {
898 pr_debug("nf_conntrack_in: new packet for %p\n", ct);
899 *ctinfo = IP_CT_NEW;
900 }
901 *set_reply = 0;
902 }
903 skb->nfct = &ct->ct_general;
904 skb->nfctinfo = *ctinfo;
905 return ct;
906}
907
908unsigned int
909nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
910 struct sk_buff *skb)
911{
912 struct nf_conn *ct, *tmpl = NULL;
913 enum ip_conntrack_info ctinfo;
914 struct nf_conntrack_l3proto *l3proto;
915 struct nf_conntrack_l4proto *l4proto;
916 unsigned int dataoff;
917 u_int8_t protonum;
918 int set_reply = 0;
919 int ret;
920
921 if (skb->nfct) {
922
923 tmpl = (struct nf_conn *)skb->nfct;
924 if (!nf_ct_is_template(tmpl)) {
925 NF_CT_STAT_INC_ATOMIC(net, ignore);
926 return NF_ACCEPT;
927 }
928 skb->nfct = NULL;
929 }
930
931
932 l3proto = __nf_ct_l3proto_find(pf);
933 ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
934 &dataoff, &protonum);
935 if (ret <= 0) {
936 pr_debug("not prepared to track yet or error occurred\n");
937 NF_CT_STAT_INC_ATOMIC(net, error);
938 NF_CT_STAT_INC_ATOMIC(net, invalid);
939 ret = -ret;
940 goto out;
941 }
942
943 l4proto = __nf_ct_l4proto_find(pf, protonum);
944
945
946
947
948 if (l4proto->error != NULL) {
949 ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo,
950 pf, hooknum);
951 if (ret <= 0) {
952 NF_CT_STAT_INC_ATOMIC(net, error);
953 NF_CT_STAT_INC_ATOMIC(net, invalid);
954 ret = -ret;
955 goto out;
956 }
957
958 if (skb->nfct)
959 goto out;
960 }
961
962 ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
963 l3proto, l4proto, &set_reply, &ctinfo);
964 if (!ct) {
965
966 NF_CT_STAT_INC_ATOMIC(net, invalid);
967 ret = NF_ACCEPT;
968 goto out;
969 }
970
971 if (IS_ERR(ct)) {
972
973 NF_CT_STAT_INC_ATOMIC(net, drop);
974 ret = NF_DROP;
975 goto out;
976 }
977
978 NF_CT_ASSERT(skb->nfct);
979
980 ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum);
981 if (ret <= 0) {
982
983
984 pr_debug("nf_conntrack_in: Can't track with proto module\n");
985 nf_conntrack_put(skb->nfct);
986 skb->nfct = NULL;
987 NF_CT_STAT_INC_ATOMIC(net, invalid);
988 if (ret == -NF_DROP)
989 NF_CT_STAT_INC_ATOMIC(net, drop);
990 ret = -ret;
991 goto out;
992 }
993
994 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
995 nf_conntrack_event_cache(IPCT_REPLY, ct);
996out:
997 if (tmpl) {
998
999
1000
1001 if (ret == NF_REPEAT)
1002 skb->nfct = (struct nf_conntrack *)tmpl;
1003 else
1004 nf_ct_put(tmpl);
1005 }
1006
1007 return ret;
1008}
1009EXPORT_SYMBOL_GPL(nf_conntrack_in);
1010
1011bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
1012 const struct nf_conntrack_tuple *orig)
1013{
1014 bool ret;
1015
1016 rcu_read_lock();
1017 ret = nf_ct_invert_tuple(inverse, orig,
1018 __nf_ct_l3proto_find(orig->src.l3num),
1019 __nf_ct_l4proto_find(orig->src.l3num,
1020 orig->dst.protonum));
1021 rcu_read_unlock();
1022 return ret;
1023}
1024EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
1025
1026
1027
1028void nf_conntrack_alter_reply(struct nf_conn *ct,
1029 const struct nf_conntrack_tuple *newreply)
1030{
1031 struct nf_conn_help *help = nfct_help(ct);
1032
1033
1034 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
1035
1036 pr_debug("Altering reply tuple of %p to ", ct);
1037 nf_ct_dump_tuple(newreply);
1038
1039 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
1040 if (ct->master || (help && !hlist_empty(&help->expectations)))
1041 return;
1042
1043 rcu_read_lock();
1044 __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1045 rcu_read_unlock();
1046}
1047EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
1048
1049
1050void __nf_ct_refresh_acct(struct nf_conn *ct,
1051 enum ip_conntrack_info ctinfo,
1052 const struct sk_buff *skb,
1053 unsigned long extra_jiffies,
1054 int do_acct)
1055{
1056 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
1057 NF_CT_ASSERT(skb);
1058
1059
1060 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
1061 goto acct;
1062
1063
1064 if (!nf_ct_is_confirmed(ct)) {
1065 ct->timeout.expires = extra_jiffies;
1066 } else {
1067 unsigned long newtime = jiffies + extra_jiffies;
1068
1069
1070
1071
1072 if (newtime - ct->timeout.expires >= HZ)
1073 mod_timer_pending(&ct->timeout, newtime);
1074 }
1075
1076acct:
1077 if (do_acct) {
1078 struct nf_conn_counter *acct;
1079
1080 acct = nf_conn_acct_find(ct);
1081 if (acct) {
1082 atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
1083 atomic64_add(skb->len, &acct[CTINFO2DIR(ctinfo)].bytes);
1084 }
1085 }
1086}
1087EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
1088
1089bool __nf_ct_kill_acct(struct nf_conn *ct,
1090 enum ip_conntrack_info ctinfo,
1091 const struct sk_buff *skb,
1092 int do_acct)
1093{
1094 if (do_acct) {
1095 struct nf_conn_counter *acct;
1096
1097 acct = nf_conn_acct_find(ct);
1098 if (acct) {
1099 atomic64_inc(&acct[CTINFO2DIR(ctinfo)].packets);
1100 atomic64_add(skb->len - skb_network_offset(skb),
1101 &acct[CTINFO2DIR(ctinfo)].bytes);
1102 }
1103 }
1104
1105 if (del_timer(&ct->timeout)) {
1106 ct->timeout.function((unsigned long)ct);
1107 return true;
1108 }
1109 return false;
1110}
1111EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
1112
1113#ifdef CONFIG_NF_CONNTRACK_ZONES
1114static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
1115 .len = sizeof(struct nf_conntrack_zone),
1116 .align = __alignof__(struct nf_conntrack_zone),
1117 .id = NF_CT_EXT_ZONE,
1118};
1119#endif
1120
1121#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1122
1123#include <linux/netfilter/nfnetlink.h>
1124#include <linux/netfilter/nfnetlink_conntrack.h>
1125#include <linux/mutex.h>
1126
1127
1128
1129
1130int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1131 const struct nf_conntrack_tuple *tuple)
1132{
1133 NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port);
1134 NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port);
1135 return 0;
1136
1137nla_put_failure:
1138 return -1;
1139}
1140EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1141
1142const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1143 [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 },
1144 [CTA_PROTO_DST_PORT] = { .type = NLA_U16 },
1145};
1146EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1147
1148int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1149 struct nf_conntrack_tuple *t)
1150{
1151 if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1152 return -EINVAL;
1153
1154 t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1155 t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1156
1157 return 0;
1158}
1159EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1160
1161int nf_ct_port_nlattr_tuple_size(void)
1162{
1163 return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1164}
1165EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1166#endif
1167
1168
1169static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
1170{
1171 struct nf_conn *ct;
1172 enum ip_conntrack_info ctinfo;
1173
1174
1175 ct = nf_ct_get(skb, &ctinfo);
1176 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1177 ctinfo = IP_CT_RELATED_REPLY;
1178 else
1179 ctinfo = IP_CT_RELATED;
1180
1181
1182 nskb->nfct = &ct->ct_general;
1183 nskb->nfctinfo = ctinfo;
1184 nf_conntrack_get(nskb->nfct);
1185}
1186
1187
1188static struct nf_conn *
1189get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1190 void *data, unsigned int *bucket)
1191{
1192 struct nf_conntrack_tuple_hash *h;
1193 struct nf_conn *ct;
1194 struct hlist_nulls_node *n;
1195
1196 spin_lock_bh(&nf_conntrack_lock);
1197 for (; *bucket < net->ct.htable_size; (*bucket)++) {
1198 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1199 ct = nf_ct_tuplehash_to_ctrack(h);
1200 if (iter(ct, data))
1201 goto found;
1202 }
1203 }
1204 hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) {
1205 ct = nf_ct_tuplehash_to_ctrack(h);
1206 if (iter(ct, data))
1207 set_bit(IPS_DYING_BIT, &ct->status);
1208 }
1209 spin_unlock_bh(&nf_conntrack_lock);
1210 return NULL;
1211found:
1212 atomic_inc(&ct->ct_general.use);
1213 spin_unlock_bh(&nf_conntrack_lock);
1214 return ct;
1215}
1216
1217void nf_ct_iterate_cleanup(struct net *net,
1218 int (*iter)(struct nf_conn *i, void *data),
1219 void *data)
1220{
1221 struct nf_conn *ct;
1222 unsigned int bucket = 0;
1223
1224 while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
1225
1226 if (del_timer(&ct->timeout))
1227 death_by_timeout((unsigned long)ct);
1228
1229
1230 nf_ct_put(ct);
1231 }
1232}
1233EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
1234
1235struct __nf_ct_flush_report {
1236 u32 pid;
1237 int report;
1238};
1239
1240static int kill_report(struct nf_conn *i, void *data)
1241{
1242 struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
1243 struct nf_conn_tstamp *tstamp;
1244
1245 tstamp = nf_conn_tstamp_find(i);
1246 if (tstamp && tstamp->stop == 0)
1247 tstamp->stop = ktime_to_ns(ktime_get_real());
1248
1249
1250 if (nf_conntrack_event_report(IPCT_DESTROY, i,
1251 fr->pid, fr->report) < 0)
1252 return 1;
1253
1254
1255 set_bit(IPS_DYING_BIT, &i->status);
1256 return 1;
1257}
1258
1259static int kill_all(struct nf_conn *i, void *data)
1260{
1261 return 1;
1262}
1263
1264void nf_ct_free_hashtable(void *hash, unsigned int size)
1265{
1266 if (is_vmalloc_addr(hash))
1267 vfree(hash);
1268 else
1269 free_pages((unsigned long)hash,
1270 get_order(sizeof(struct hlist_head) * size));
1271}
1272EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1273
1274void nf_conntrack_flush_report(struct net *net, u32 pid, int report)
1275{
1276 struct __nf_ct_flush_report fr = {
1277 .pid = pid,
1278 .report = report,
1279 };
1280 nf_ct_iterate_cleanup(net, kill_report, &fr);
1281}
1282EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
1283
1284static void nf_ct_release_dying_list(struct net *net)
1285{
1286 struct nf_conntrack_tuple_hash *h;
1287 struct nf_conn *ct;
1288 struct hlist_nulls_node *n;
1289
1290 spin_lock_bh(&nf_conntrack_lock);
1291 hlist_nulls_for_each_entry(h, n, &net->ct.dying, hnnode) {
1292 ct = nf_ct_tuplehash_to_ctrack(h);
1293
1294 nf_ct_kill(ct);
1295 }
1296 spin_unlock_bh(&nf_conntrack_lock);
1297}
1298
1299static int untrack_refs(void)
1300{
1301 int cnt = 0, cpu;
1302
1303 for_each_possible_cpu(cpu) {
1304 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1305
1306 cnt += atomic_read(&ct->ct_general.use) - 1;
1307 }
1308 return cnt;
1309}
1310
1311static void nf_conntrack_cleanup_init_net(void)
1312{
1313 while (untrack_refs() > 0)
1314 schedule();
1315
1316 nf_conntrack_helper_fini();
1317 nf_conntrack_proto_fini();
1318#ifdef CONFIG_NF_CONNTRACK_ZONES
1319 nf_ct_extend_unregister(&nf_ct_zone_extend);
1320#endif
1321}
1322
1323static void nf_conntrack_cleanup_net(struct net *net)
1324{
1325 i_see_dead_people:
1326 nf_ct_iterate_cleanup(net, kill_all, NULL);
1327 nf_ct_release_dying_list(net);
1328 if (atomic_read(&net->ct.count) != 0) {
1329 schedule();
1330 goto i_see_dead_people;
1331 }
1332
1333 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1334 nf_conntrack_ecache_fini(net);
1335 nf_conntrack_tstamp_fini(net);
1336 nf_conntrack_acct_fini(net);
1337 nf_conntrack_expect_fini(net);
1338 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1339 kfree(net->ct.slabname);
1340 free_percpu(net->ct.stat);
1341}
1342
1343
1344
1345void nf_conntrack_cleanup(struct net *net)
1346{
1347 if (net_eq(net, &init_net))
1348 RCU_INIT_POINTER(ip_ct_attach, NULL);
1349
1350
1351
1352
1353 synchronize_net();
1354
1355 nf_conntrack_cleanup_net(net);
1356
1357 if (net_eq(net, &init_net)) {
1358 RCU_INIT_POINTER(nf_ct_destroy, NULL);
1359 nf_conntrack_cleanup_init_net();
1360 }
1361}
1362
1363void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
1364{
1365 struct hlist_nulls_head *hash;
1366 unsigned int nr_slots, i;
1367 size_t sz;
1368
1369 BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1370 nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1371 sz = nr_slots * sizeof(struct hlist_nulls_head);
1372 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1373 get_order(sz));
1374 if (!hash) {
1375 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1376 hash = vzalloc(sz);
1377 }
1378
1379 if (hash && nulls)
1380 for (i = 0; i < nr_slots; i++)
1381 INIT_HLIST_NULLS_HEAD(&hash[i], i);
1382
1383 return hash;
1384}
1385EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1386
1387int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1388{
1389 int i, bucket;
1390 unsigned int hashsize, old_size;
1391 struct hlist_nulls_head *hash, *old_hash;
1392 struct nf_conntrack_tuple_hash *h;
1393 struct nf_conn *ct;
1394
1395 if (current->nsproxy->net_ns != &init_net)
1396 return -EOPNOTSUPP;
1397
1398
1399 if (!nf_conntrack_htable_size)
1400 return param_set_uint(val, kp);
1401
1402 hashsize = simple_strtoul(val, NULL, 0);
1403 if (!hashsize)
1404 return -EINVAL;
1405
1406 hash = nf_ct_alloc_hashtable(&hashsize, 1);
1407 if (!hash)
1408 return -ENOMEM;
1409
1410
1411
1412
1413
1414
1415 spin_lock_bh(&nf_conntrack_lock);
1416 for (i = 0; i < init_net.ct.htable_size; i++) {
1417 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1418 h = hlist_nulls_entry(init_net.ct.hash[i].first,
1419 struct nf_conntrack_tuple_hash, hnnode);
1420 ct = nf_ct_tuplehash_to_ctrack(h);
1421 hlist_nulls_del_rcu(&h->hnnode);
1422 bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
1423 hashsize);
1424 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1425 }
1426 }
1427 old_size = init_net.ct.htable_size;
1428 old_hash = init_net.ct.hash;
1429
1430 init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
1431 init_net.ct.hash = hash;
1432 spin_unlock_bh(&nf_conntrack_lock);
1433
1434 nf_ct_free_hashtable(old_hash, old_size);
1435 return 0;
1436}
1437EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1438
1439module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1440 &nf_conntrack_htable_size, 0600);
1441
1442void nf_ct_untracked_status_or(unsigned long bits)
1443{
1444 int cpu;
1445
1446 for_each_possible_cpu(cpu)
1447 per_cpu(nf_conntrack_untracked, cpu).status |= bits;
1448}
1449EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
1450
1451static int nf_conntrack_init_init_net(void)
1452{
1453 int max_factor = 8;
1454 int ret, cpu;
1455
1456
1457
1458 if (!nf_conntrack_htable_size) {
1459 nf_conntrack_htable_size
1460 = (((totalram_pages << PAGE_SHIFT) / 16384)
1461 / sizeof(struct hlist_head));
1462 if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
1463 nf_conntrack_htable_size = 16384;
1464 if (nf_conntrack_htable_size < 32)
1465 nf_conntrack_htable_size = 32;
1466
1467
1468
1469
1470
1471 max_factor = 4;
1472 }
1473 nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1474
1475 printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
1476 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1477 nf_conntrack_max);
1478
1479 ret = nf_conntrack_proto_init();
1480 if (ret < 0)
1481 goto err_proto;
1482
1483 ret = nf_conntrack_helper_init();
1484 if (ret < 0)
1485 goto err_helper;
1486
1487#ifdef CONFIG_NF_CONNTRACK_ZONES
1488 ret = nf_ct_extend_register(&nf_ct_zone_extend);
1489 if (ret < 0)
1490 goto err_extend;
1491#endif
1492
1493 for_each_possible_cpu(cpu) {
1494 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1495 write_pnet(&ct->ct_net, &init_net);
1496 atomic_set(&ct->ct_general.use, 1);
1497 }
1498
1499 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
1500 return 0;
1501
1502#ifdef CONFIG_NF_CONNTRACK_ZONES
1503err_extend:
1504 nf_conntrack_helper_fini();
1505#endif
1506err_helper:
1507 nf_conntrack_proto_fini();
1508err_proto:
1509 return ret;
1510}
1511
1512
1513
1514
1515#define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
1516#define DYING_NULLS_VAL ((1<<30)+1)
1517
1518static int nf_conntrack_init_net(struct net *net)
1519{
1520 int ret;
1521
1522 atomic_set(&net->ct.count, 0);
1523 INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL);
1524 INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL);
1525 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1526 if (!net->ct.stat) {
1527 ret = -ENOMEM;
1528 goto err_stat;
1529 }
1530
1531 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
1532 if (!net->ct.slabname) {
1533 ret = -ENOMEM;
1534 goto err_slabname;
1535 }
1536
1537 net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
1538 sizeof(struct nf_conn), 0,
1539 SLAB_DESTROY_BY_RCU, NULL);
1540 if (!net->ct.nf_conntrack_cachep) {
1541 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1542 ret = -ENOMEM;
1543 goto err_cache;
1544 }
1545
1546 net->ct.htable_size = nf_conntrack_htable_size;
1547 net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1);
1548 if (!net->ct.hash) {
1549 ret = -ENOMEM;
1550 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1551 goto err_hash;
1552 }
1553 ret = nf_conntrack_expect_init(net);
1554 if (ret < 0)
1555 goto err_expect;
1556 ret = nf_conntrack_acct_init(net);
1557 if (ret < 0)
1558 goto err_acct;
1559 ret = nf_conntrack_tstamp_init(net);
1560 if (ret < 0)
1561 goto err_tstamp;
1562 ret = nf_conntrack_ecache_init(net);
1563 if (ret < 0)
1564 goto err_ecache;
1565
1566 return 0;
1567
1568err_ecache:
1569 nf_conntrack_tstamp_fini(net);
1570err_tstamp:
1571 nf_conntrack_acct_fini(net);
1572err_acct:
1573 nf_conntrack_expect_fini(net);
1574err_expect:
1575 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1576err_hash:
1577 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1578err_cache:
1579 kfree(net->ct.slabname);
1580err_slabname:
1581 free_percpu(net->ct.stat);
1582err_stat:
1583 return ret;
1584}
1585
1586s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
1587 enum ip_conntrack_dir dir,
1588 u32 seq);
1589EXPORT_SYMBOL_GPL(nf_ct_nat_offset);
1590
1591int nf_conntrack_init(struct net *net)
1592{
1593 int ret;
1594
1595 if (net_eq(net, &init_net)) {
1596 ret = nf_conntrack_init_init_net();
1597 if (ret < 0)
1598 goto out_init_net;
1599 }
1600 ret = nf_conntrack_init_net(net);
1601 if (ret < 0)
1602 goto out_net;
1603
1604 if (net_eq(net, &init_net)) {
1605
1606 RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
1607 RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
1608
1609
1610 RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
1611 }
1612 return 0;
1613
1614out_net:
1615 if (net_eq(net, &init_net))
1616 nf_conntrack_cleanup_init_net();
1617out_init_net:
1618 return ret;
1619}
1620