linux/lib/percpu_counter.c
<<
>>
Prefs
   1/*
   2 * Fast batching percpu counters.
   3 */
   4
   5#include <linux/percpu_counter.h>
   6#include <linux/notifier.h>
   7#include <linux/mutex.h>
   8#include <linux/init.h>
   9#include <linux/cpu.h>
  10#include <linux/module.h>
  11
  12#ifdef CONFIG_HOTPLUG_CPU
  13static LIST_HEAD(percpu_counters);
  14static DEFINE_MUTEX(percpu_counters_lock);
  15#endif
  16
  17void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
  18{
  19        int cpu;
  20
  21        spin_lock(&fbc->lock);
  22        for_each_possible_cpu(cpu) {
  23                s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
  24                *pcount = 0;
  25        }
  26        fbc->count = amount;
  27        spin_unlock(&fbc->lock);
  28}
  29EXPORT_SYMBOL(percpu_counter_set);
  30
  31void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
  32{
  33        s64 count;
  34        s32 *pcount;
  35        int cpu = get_cpu();
  36
  37        pcount = per_cpu_ptr(fbc->counters, cpu);
  38        count = *pcount + amount;
  39        if (count >= batch || count <= -batch) {
  40                spin_lock(&fbc->lock);
  41                fbc->count += count;
  42                *pcount = 0;
  43                spin_unlock(&fbc->lock);
  44        } else {
  45                *pcount = count;
  46        }
  47        put_cpu();
  48}
  49EXPORT_SYMBOL(__percpu_counter_add);
  50
  51/*
  52 * Add up all the per-cpu counts, return the result.  This is a more accurate
  53 * but much slower version of percpu_counter_read_positive()
  54 */
  55s64 __percpu_counter_sum(struct percpu_counter *fbc)
  56{
  57        s64 ret;
  58        int cpu;
  59
  60        spin_lock(&fbc->lock);
  61        ret = fbc->count;
  62        for_each_online_cpu(cpu) {
  63                s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
  64                ret += *pcount;
  65                *pcount = 0;
  66        }
  67        fbc->count = ret;
  68
  69        spin_unlock(&fbc->lock);
  70        return ret;
  71}
  72EXPORT_SYMBOL(__percpu_counter_sum);
  73
  74static struct lock_class_key percpu_counter_irqsafe;
  75
  76int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
  77{
  78        spin_lock_init(&fbc->lock);
  79        fbc->count = amount;
  80        fbc->counters = alloc_percpu(s32);
  81        if (!fbc->counters)
  82                return -ENOMEM;
  83#ifdef CONFIG_HOTPLUG_CPU
  84        INIT_LIST_HEAD(&fbc->list);
  85        mutex_lock(&percpu_counters_lock);
  86        list_add(&fbc->list, &percpu_counters);
  87        mutex_unlock(&percpu_counters_lock);
  88#endif
  89        return 0;
  90}
  91EXPORT_SYMBOL(percpu_counter_init);
  92
  93int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount)
  94{
  95        int err;
  96
  97        err = percpu_counter_init(fbc, amount);
  98        if (!err)
  99                lockdep_set_class(&fbc->lock, &percpu_counter_irqsafe);
 100        return err;
 101}
 102
 103void percpu_counter_destroy(struct percpu_counter *fbc)
 104{
 105        if (!fbc->counters)
 106                return;
 107
 108        free_percpu(fbc->counters);
 109        fbc->counters = NULL;
 110#ifdef CONFIG_HOTPLUG_CPU
 111        mutex_lock(&percpu_counters_lock);
 112        list_del(&fbc->list);
 113        mutex_unlock(&percpu_counters_lock);
 114#endif
 115}
 116EXPORT_SYMBOL(percpu_counter_destroy);
 117
 118#ifdef CONFIG_HOTPLUG_CPU
 119static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
 120                                        unsigned long action, void *hcpu)
 121{
 122        unsigned int cpu;
 123        struct percpu_counter *fbc;
 124
 125        if (action != CPU_DEAD)
 126                return NOTIFY_OK;
 127
 128        cpu = (unsigned long)hcpu;
 129        mutex_lock(&percpu_counters_lock);
 130        list_for_each_entry(fbc, &percpu_counters, list) {
 131                s32 *pcount;
 132                unsigned long flags;
 133
 134                spin_lock_irqsave(&fbc->lock, flags);
 135                pcount = per_cpu_ptr(fbc->counters, cpu);
 136                fbc->count += *pcount;
 137                *pcount = 0;
 138                spin_unlock_irqrestore(&fbc->lock, flags);
 139        }
 140        mutex_unlock(&percpu_counters_lock);
 141        return NOTIFY_OK;
 142}
 143
 144static int __init percpu_counter_startup(void)
 145{
 146        hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
 147        return 0;
 148}
 149module_init(percpu_counter_startup);
 150#endif
 151