1
2
3
4
5
6
7
8
9
10
11
12#include <linux/types.h>
13#include <linux/netfilter.h>
14#include <linux/skbuff.h>
15#include <linux/vmalloc.h>
16#include <linux/stddef.h>
17#include <linux/err.h>
18#include <linux/percpu.h>
19#include <linux/notifier.h>
20#include <linux/kernel.h>
21#include <linux/netdevice.h>
22
23#include <net/netfilter/nf_conntrack.h>
24#include <net/netfilter/nf_conntrack_core.h>
25
26ATOMIC_NOTIFIER_HEAD(nf_conntrack_chain);
27EXPORT_SYMBOL_GPL(nf_conntrack_chain);
28
29ATOMIC_NOTIFIER_HEAD(nf_ct_expect_chain);
30EXPORT_SYMBOL_GPL(nf_ct_expect_chain);
31
32
33
34static inline void
35__nf_ct_deliver_cached_events(struct nf_conntrack_ecache *ecache)
36{
37 if (nf_ct_is_confirmed(ecache->ct) && !nf_ct_is_dying(ecache->ct)
38 && ecache->events) {
39 struct nf_ct_event item = {
40 .ct = ecache->ct,
41 .pid = 0,
42 .report = 0
43 };
44
45 atomic_notifier_call_chain(&nf_conntrack_chain,
46 ecache->events,
47 &item);
48 }
49
50 ecache->events = 0;
51 nf_ct_put(ecache->ct);
52 ecache->ct = NULL;
53}
54
55
56
57void nf_ct_deliver_cached_events(const struct nf_conn *ct)
58{
59 struct net *net = nf_ct_net(ct);
60 struct nf_conntrack_ecache *ecache;
61
62 local_bh_disable();
63 ecache = per_cpu_ptr(net->ct.ecache, raw_smp_processor_id());
64 if (ecache->ct == ct)
65 __nf_ct_deliver_cached_events(ecache);
66 local_bh_enable();
67}
68EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
69
70
71void __nf_ct_event_cache_init(struct nf_conn *ct)
72{
73 struct net *net = nf_ct_net(ct);
74 struct nf_conntrack_ecache *ecache;
75
76
77 ecache = per_cpu_ptr(net->ct.ecache, raw_smp_processor_id());
78 BUG_ON(ecache->ct == ct);
79 if (ecache->ct)
80 __nf_ct_deliver_cached_events(ecache);
81
82 ecache->ct = ct;
83 nf_conntrack_get(&ct->ct_general);
84}
85EXPORT_SYMBOL_GPL(__nf_ct_event_cache_init);
86
87
88
89void nf_ct_event_cache_flush(struct net *net)
90{
91 struct nf_conntrack_ecache *ecache;
92 int cpu;
93
94 for_each_possible_cpu(cpu) {
95 ecache = per_cpu_ptr(net->ct.ecache, cpu);
96 if (ecache->ct)
97 nf_ct_put(ecache->ct);
98 }
99}
100
101int nf_conntrack_ecache_init(struct net *net)
102{
103 net->ct.ecache = alloc_percpu(struct nf_conntrack_ecache);
104 if (!net->ct.ecache)
105 return -ENOMEM;
106 return 0;
107}
108
109void nf_conntrack_ecache_fini(struct net *net)
110{
111 free_percpu(net->ct.ecache);
112}
113
114int nf_conntrack_register_notifier(struct notifier_block *nb)
115{
116 return atomic_notifier_chain_register(&nf_conntrack_chain, nb);
117}
118EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
119
120int nf_conntrack_unregister_notifier(struct notifier_block *nb)
121{
122 return atomic_notifier_chain_unregister(&nf_conntrack_chain, nb);
123}
124EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
125
126int nf_ct_expect_register_notifier(struct notifier_block *nb)
127{
128 return atomic_notifier_chain_register(&nf_ct_expect_chain, nb);
129}
130EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
131
132int nf_ct_expect_unregister_notifier(struct notifier_block *nb)
133{
134 return atomic_notifier_chain_unregister(&nf_ct_expect_chain, nb);
135}
136EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
137