linux/lib/percpu_counter.c
<<
>>
Prefs
   1/*
   2 * Fast batching percpu counters.
   3 */
   4
   5#include <linux/percpu_counter.h>
   6#include <linux/notifier.h>
   7#include <linux/mutex.h>
   8#include <linux/init.h>
   9#include <linux/cpu.h>
  10#include <linux/module.h>
  11
  12#ifdef CONFIG_HOTPLUG_CPU
  13static LIST_HEAD(percpu_counters);
  14static DEFINE_MUTEX(percpu_counters_lock);
  15#endif
  16
  17void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
  18{
  19        int cpu;
  20
  21        spin_lock(&fbc->lock);
  22        for_each_possible_cpu(cpu) {
  23                s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
  24                *pcount = 0;
  25        }
  26        fbc->count = amount;
  27        spin_unlock(&fbc->lock);
  28}
  29EXPORT_SYMBOL(percpu_counter_set);
  30
  31void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
  32{
  33        s64 count;
  34        s32 *pcount;
  35        int cpu = get_cpu();
  36
  37        pcount = per_cpu_ptr(fbc->counters, cpu);
  38        count = *pcount + amount;
  39        if (count >= batch || count <= -batch) {
  40                spin_lock(&fbc->lock);
  41                fbc->count += count;
  42                *pcount = 0;
  43                spin_unlock(&fbc->lock);
  44        } else {
  45                *pcount = count;
  46        }
  47        put_cpu();
  48}
  49EXPORT_SYMBOL(__percpu_counter_add);
  50
  51/*
  52 * Add up all the per-cpu counts, return the result.  This is a more accurate
  53 * but much slower version of percpu_counter_read_positive()
  54 */
  55s64 __percpu_counter_sum(struct percpu_counter *fbc)
  56{
  57        s64 ret;
  58        int cpu;
  59
  60        spin_lock(&fbc->lock);
  61        ret = fbc->count;
  62        for_each_online_cpu(cpu) {
  63                s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
  64                ret += *pcount;
  65        }
  66        spin_unlock(&fbc->lock);
  67        return ret;
  68}
  69EXPORT_SYMBOL(__percpu_counter_sum);
  70
  71static struct lock_class_key percpu_counter_irqsafe;
  72
  73int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
  74{
  75        spin_lock_init(&fbc->lock);
  76        fbc->count = amount;
  77        fbc->counters = alloc_percpu(s32);
  78        if (!fbc->counters)
  79                return -ENOMEM;
  80#ifdef CONFIG_HOTPLUG_CPU
  81        mutex_lock(&percpu_counters_lock);
  82        list_add(&fbc->list, &percpu_counters);
  83        mutex_unlock(&percpu_counters_lock);
  84#endif
  85        return 0;
  86}
  87EXPORT_SYMBOL(percpu_counter_init);
  88
  89int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount)
  90{
  91        int err;
  92
  93        err = percpu_counter_init(fbc, amount);
  94        if (!err)
  95                lockdep_set_class(&fbc->lock, &percpu_counter_irqsafe);
  96        return err;
  97}
  98
  99void percpu_counter_destroy(struct percpu_counter *fbc)
 100{
 101        if (!fbc->counters)
 102                return;
 103
 104#ifdef CONFIG_HOTPLUG_CPU
 105        mutex_lock(&percpu_counters_lock);
 106        list_del(&fbc->list);
 107        mutex_unlock(&percpu_counters_lock);
 108#endif
 109        free_percpu(fbc->counters);
 110        fbc->counters = NULL;
 111}
 112EXPORT_SYMBOL(percpu_counter_destroy);
 113
 114#ifdef CONFIG_HOTPLUG_CPU
 115static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
 116                                        unsigned long action, void *hcpu)
 117{
 118        unsigned int cpu;
 119        struct percpu_counter *fbc;
 120
 121        if (action != CPU_DEAD)
 122                return NOTIFY_OK;
 123
 124        cpu = (unsigned long)hcpu;
 125        mutex_lock(&percpu_counters_lock);
 126        list_for_each_entry(fbc, &percpu_counters, list) {
 127                s32 *pcount;
 128                unsigned long flags;
 129
 130                spin_lock_irqsave(&fbc->lock, flags);
 131                pcount = per_cpu_ptr(fbc->counters, cpu);
 132                fbc->count += *pcount;
 133                *pcount = 0;
 134                spin_unlock_irqrestore(&fbc->lock, flags);
 135        }
 136        mutex_unlock(&percpu_counters_lock);
 137        return NOTIFY_OK;
 138}
 139
 140static int __init percpu_counter_startup(void)
 141{
 142        hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
 143        return 0;
 144}
 145module_init(percpu_counter_startup);
 146#endif
 147