linux/net/core/flow.c
<<
>>
Prefs
   1/* flow.c: Generic flow cache.
   2 *
   3 * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
   4 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/module.h>
   9#include <linux/list.h>
  10#include <linux/jhash.h>
  11#include <linux/interrupt.h>
  12#include <linux/mm.h>
  13#include <linux/random.h>
  14#include <linux/init.h>
  15#include <linux/slab.h>
  16#include <linux/smp.h>
  17#include <linux/completion.h>
  18#include <linux/percpu.h>
  19#include <linux/bitops.h>
  20#include <linux/notifier.h>
  21#include <linux/cpu.h>
  22#include <linux/cpumask.h>
  23#include <linux/mutex.h>
  24#include <net/flow.h>
  25#include <asm/atomic.h>
  26#include <asm/semaphore.h>
  27#include <linux/security.h>
  28
  29struct flow_cache_entry {
  30        struct flow_cache_entry *next;
  31        u16                     family;
  32        u8                      dir;
  33        struct flowi            key;
  34        u32                     genid;
  35        void                    *object;
  36        atomic_t                *object_ref;
  37};
  38
  39atomic_t flow_cache_genid = ATOMIC_INIT(0);
  40
  41static u32 flow_hash_shift;
  42#define flow_hash_size  (1 << flow_hash_shift)
  43static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
  44
  45#define flow_table(cpu) (per_cpu(flow_tables, cpu))
  46
  47static struct kmem_cache *flow_cachep __read_mostly;
  48
  49static int flow_lwm, flow_hwm;
  50
  51struct flow_percpu_info {
  52        int hash_rnd_recalc;
  53        u32 hash_rnd;
  54        int count;
  55} ____cacheline_aligned;
  56static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
  57
  58#define flow_hash_rnd_recalc(cpu) \
  59        (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
  60#define flow_hash_rnd(cpu) \
  61        (per_cpu(flow_hash_info, cpu).hash_rnd)
  62#define flow_count(cpu) \
  63        (per_cpu(flow_hash_info, cpu).count)
  64
  65static struct timer_list flow_hash_rnd_timer;
  66
  67#define FLOW_HASH_RND_PERIOD    (10 * 60 * HZ)
  68
  69struct flow_flush_info {
  70        atomic_t cpuleft;
  71        struct completion completion;
  72};
  73static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
  74
  75#define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
  76
  77static void flow_cache_new_hashrnd(unsigned long arg)
  78{
  79        int i;
  80
  81        for_each_possible_cpu(i)
  82                flow_hash_rnd_recalc(i) = 1;
  83
  84        flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
  85        add_timer(&flow_hash_rnd_timer);
  86}
  87
  88static void flow_entry_kill(int cpu, struct flow_cache_entry *fle)
  89{
  90        if (fle->object)
  91                atomic_dec(fle->object_ref);
  92        kmem_cache_free(flow_cachep, fle);
  93        flow_count(cpu)--;
  94}
  95
  96static void __flow_cache_shrink(int cpu, int shrink_to)
  97{
  98        struct flow_cache_entry *fle, **flp;
  99        int i;
 100
 101        for (i = 0; i < flow_hash_size; i++) {
 102                int k = 0;
 103
 104                flp = &flow_table(cpu)[i];
 105                while ((fle = *flp) != NULL && k < shrink_to) {
 106                        k++;
 107                        flp = &fle->next;
 108                }
 109                while ((fle = *flp) != NULL) {
 110                        *flp = fle->next;
 111                        flow_entry_kill(cpu, fle);
 112                }
 113        }
 114}
 115
 116static void flow_cache_shrink(int cpu)
 117{
 118        int shrink_to = flow_lwm / flow_hash_size;
 119
 120        __flow_cache_shrink(cpu, shrink_to);
 121}
 122
 123static void flow_new_hash_rnd(int cpu)
 124{
 125        get_random_bytes(&flow_hash_rnd(cpu), sizeof(u32));
 126        flow_hash_rnd_recalc(cpu) = 0;
 127
 128        __flow_cache_shrink(cpu, 0);
 129}
 130
 131static u32 flow_hash_code(struct flowi *key, int cpu)
 132{
 133        u32 *k = (u32 *) key;
 134
 135        return (jhash2(k, (sizeof(*key) / sizeof(u32)), flow_hash_rnd(cpu)) &
 136                (flow_hash_size - 1));
 137}
 138
 139#if (BITS_PER_LONG == 64)
 140typedef u64 flow_compare_t;
 141#else
 142typedef u32 flow_compare_t;
 143#endif
 144
 145/* I hear what you're saying, use memcmp.  But memcmp cannot make
 146 * important assumptions that we can here, such as alignment and
 147 * constant size.
 148 */
 149static int flow_key_compare(struct flowi *key1, struct flowi *key2)
 150{
 151        flow_compare_t *k1, *k1_lim, *k2;
 152        const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
 153
 154        BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
 155
 156        k1 = (flow_compare_t *) key1;
 157        k1_lim = k1 + n_elem;
 158
 159        k2 = (flow_compare_t *) key2;
 160
 161        do {
 162                if (*k1++ != *k2++)
 163                        return 1;
 164        } while (k1 < k1_lim);
 165
 166        return 0;
 167}
 168
 169void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
 170                        flow_resolve_t resolver)
 171{
 172        struct flow_cache_entry *fle, **head;
 173        unsigned int hash;
 174        int cpu;
 175
 176        local_bh_disable();
 177        cpu = smp_processor_id();
 178
 179        fle = NULL;
 180        /* Packet really early in init?  Making flow_cache_init a
 181         * pre-smp initcall would solve this.  --RR */
 182        if (!flow_table(cpu))
 183                goto nocache;
 184
 185        if (flow_hash_rnd_recalc(cpu))
 186                flow_new_hash_rnd(cpu);
 187        hash = flow_hash_code(key, cpu);
 188
 189        head = &flow_table(cpu)[hash];
 190        for (fle = *head; fle; fle = fle->next) {
 191                if (fle->family == family &&
 192                    fle->dir == dir &&
 193                    flow_key_compare(key, &fle->key) == 0) {
 194                        if (fle->genid == atomic_read(&flow_cache_genid)) {
 195                                void *ret = fle->object;
 196
 197                                if (ret)
 198                                        atomic_inc(fle->object_ref);
 199                                local_bh_enable();
 200
 201                                return ret;
 202                        }
 203                        break;
 204                }
 205        }
 206
 207        if (!fle) {
 208                if (flow_count(cpu) > flow_hwm)
 209                        flow_cache_shrink(cpu);
 210
 211                fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
 212                if (fle) {
 213                        fle->next = *head;
 214                        *head = fle;
 215                        fle->family = family;
 216                        fle->dir = dir;
 217                        memcpy(&fle->key, key, sizeof(*key));
 218                        fle->object = NULL;
 219                        flow_count(cpu)++;
 220                }
 221        }
 222
 223nocache:
 224        {
 225                int err;
 226                void *obj;
 227                atomic_t *obj_ref;
 228
 229                err = resolver(key, family, dir, &obj, &obj_ref);
 230
 231                if (fle && !err) {
 232                        fle->genid = atomic_read(&flow_cache_genid);
 233
 234                        if (fle->object)
 235                                atomic_dec(fle->object_ref);
 236
 237                        fle->object = obj;
 238                        fle->object_ref = obj_ref;
 239                        if (obj)
 240                                atomic_inc(fle->object_ref);
 241                }
 242                local_bh_enable();
 243
 244                if (err)
 245                        obj = ERR_PTR(err);
 246                return obj;
 247        }
 248}
 249
 250static void flow_cache_flush_tasklet(unsigned long data)
 251{
 252        struct flow_flush_info *info = (void *)data;
 253        int i;
 254        int cpu;
 255
 256        cpu = smp_processor_id();
 257        for (i = 0; i < flow_hash_size; i++) {
 258                struct flow_cache_entry *fle;
 259
 260                fle = flow_table(cpu)[i];
 261                for (; fle; fle = fle->next) {
 262                        unsigned genid = atomic_read(&flow_cache_genid);
 263
 264                        if (!fle->object || fle->genid == genid)
 265                                continue;
 266
 267                        fle->object = NULL;
 268                        atomic_dec(fle->object_ref);
 269                }
 270        }
 271
 272        if (atomic_dec_and_test(&info->cpuleft))
 273                complete(&info->completion);
 274}
 275
 276static void flow_cache_flush_per_cpu(void *) __attribute__((__unused__));
 277static void flow_cache_flush_per_cpu(void *data)
 278{
 279        struct flow_flush_info *info = data;
 280        int cpu;
 281        struct tasklet_struct *tasklet;
 282
 283        cpu = smp_processor_id();
 284
 285        tasklet = flow_flush_tasklet(cpu);
 286        tasklet->data = (unsigned long)info;
 287        tasklet_schedule(tasklet);
 288}
 289
 290void flow_cache_flush(void)
 291{
 292        struct flow_flush_info info;
 293        static DEFINE_MUTEX(flow_flush_sem);
 294
 295        /* Don't want cpus going down or up during this. */
 296        lock_cpu_hotplug();
 297        mutex_lock(&flow_flush_sem);
 298        atomic_set(&info.cpuleft, num_online_cpus());
 299        init_completion(&info.completion);
 300
 301        local_bh_disable();
 302        smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0);
 303        flow_cache_flush_tasklet((unsigned long)&info);
 304        local_bh_enable();
 305
 306        wait_for_completion(&info.completion);
 307        mutex_unlock(&flow_flush_sem);
 308        unlock_cpu_hotplug();
 309}
 310
 311static void __devinit flow_cache_cpu_prepare(int cpu)
 312{
 313        struct tasklet_struct *tasklet;
 314        unsigned long order;
 315
 316        for (order = 0;
 317             (PAGE_SIZE << order) <
 318                     (sizeof(struct flow_cache_entry *)*flow_hash_size);
 319             order++)
 320                /* NOTHING */;
 321
 322        flow_table(cpu) = (struct flow_cache_entry **)
 323                __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
 324        if (!flow_table(cpu))
 325                panic("NET: failed to allocate flow cache order %lu\n", order);
 326
 327        flow_hash_rnd_recalc(cpu) = 1;
 328        flow_count(cpu) = 0;
 329
 330        tasklet = flow_flush_tasklet(cpu);
 331        tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
 332}
 333
 334static int flow_cache_cpu(struct notifier_block *nfb,
 335                          unsigned long action,
 336                          void *hcpu)
 337{
 338        if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
 339                __flow_cache_shrink((unsigned long)hcpu, 0);
 340        return NOTIFY_OK;
 341}
 342
 343static int __init flow_cache_init(void)
 344{
 345        int i;
 346
 347        flow_cachep = kmem_cache_create("flow_cache",
 348                                        sizeof(struct flow_cache_entry),
 349                                        0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
 350                                        NULL);
 351        flow_hash_shift = 10;
 352        flow_lwm = 2 * flow_hash_size;
 353        flow_hwm = 4 * flow_hash_size;
 354
 355        init_timer(&flow_hash_rnd_timer);
 356        flow_hash_rnd_timer.function = flow_cache_new_hashrnd;
 357        flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
 358        add_timer(&flow_hash_rnd_timer);
 359
 360        for_each_possible_cpu(i)
 361                flow_cache_cpu_prepare(i);
 362
 363        hotcpu_notifier(flow_cache_cpu, 0);
 364        return 0;
 365}
 366
 367module_init(flow_cache_init);
 368
 369EXPORT_SYMBOL(flow_cache_genid);
 370EXPORT_SYMBOL(flow_cache_lookup);
 371