linux/lib/percpu_counter.c
<<
>>
Prefs
   1/*
   2 * Fast batching percpu counters.
   3 */
   4
   5#include <linux/percpu_counter.h>
   6#include <linux/notifier.h>
   7#include <linux/mutex.h>
   8#include <linux/init.h>
   9#include <linux/cpu.h>
  10#include <linux/module.h>
  11
  12#ifdef CONFIG_HOTPLUG_CPU
  13static LIST_HEAD(percpu_counters);
  14static DEFINE_MUTEX(percpu_counters_lock);
  15#endif
  16
  17void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
  18{
  19        int cpu;
  20
  21        spin_lock(&fbc->lock);
  22        for_each_possible_cpu(cpu) {
  23                s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
  24                *pcount = 0;
  25        }
  26        fbc->count = amount;
  27        spin_unlock(&fbc->lock);
  28}
  29EXPORT_SYMBOL(percpu_counter_set);
  30
  31void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
  32{
  33        s64 count;
  34        s32 *pcount;
  35        int cpu = get_cpu();
  36
  37        pcount = per_cpu_ptr(fbc->counters, cpu);
  38        count = *pcount + amount;
  39        if (count >= batch || count <= -batch) {
  40                spin_lock(&fbc->lock);
  41                fbc->count += count;
  42                *pcount = 0;
  43                spin_unlock(&fbc->lock);
  44        } else {
  45                *pcount = count;
  46        }
  47        put_cpu();
  48}
  49EXPORT_SYMBOL(__percpu_counter_add);
  50
  51/*
  52 * Add up all the per-cpu counts, return the result.  This is a more accurate
  53 * but much slower version of percpu_counter_read_positive()
  54 */
  55s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
  56{
  57        s64 ret;
  58        int cpu;
  59
  60        spin_lock(&fbc->lock);
  61        ret = fbc->count;
  62        for_each_online_cpu(cpu) {
  63                s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
  64                ret += *pcount;
  65                if (set)
  66                        *pcount = 0;
  67        }
  68        if (set)
  69                fbc->count = ret;
  70
  71        spin_unlock(&fbc->lock);
  72        return ret;
  73}
  74EXPORT_SYMBOL(__percpu_counter_sum);
  75
  76static struct lock_class_key percpu_counter_irqsafe;
  77
  78int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
  79{
  80        spin_lock_init(&fbc->lock);
  81        fbc->count = amount;
  82        fbc->counters = alloc_percpu(s32);
  83        if (!fbc->counters)
  84                return -ENOMEM;
  85#ifdef CONFIG_HOTPLUG_CPU
  86        mutex_lock(&percpu_counters_lock);
  87        list_add(&fbc->list, &percpu_counters);
  88        mutex_unlock(&percpu_counters_lock);
  89#endif
  90        return 0;
  91}
  92EXPORT_SYMBOL(percpu_counter_init);
  93
  94int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount)
  95{
  96        int err;
  97
  98        err = percpu_counter_init(fbc, amount);
  99        if (!err)
 100                lockdep_set_class(&fbc->lock, &percpu_counter_irqsafe);
 101        return err;
 102}
 103
 104void percpu_counter_destroy(struct percpu_counter *fbc)
 105{
 106        if (!fbc->counters)
 107                return;
 108
 109        free_percpu(fbc->counters);
 110        fbc->counters = NULL;
 111#ifdef CONFIG_HOTPLUG_CPU
 112        mutex_lock(&percpu_counters_lock);
 113        list_del(&fbc->list);
 114        mutex_unlock(&percpu_counters_lock);
 115#endif
 116}
 117EXPORT_SYMBOL(percpu_counter_destroy);
 118
 119#ifdef CONFIG_HOTPLUG_CPU
 120static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
 121                                        unsigned long action, void *hcpu)
 122{
 123        unsigned int cpu;
 124        struct percpu_counter *fbc;
 125
 126        if (action != CPU_DEAD)
 127                return NOTIFY_OK;
 128
 129        cpu = (unsigned long)hcpu;
 130        mutex_lock(&percpu_counters_lock);
 131        list_for_each_entry(fbc, &percpu_counters, list) {
 132                s32 *pcount;
 133                unsigned long flags;
 134
 135                spin_lock_irqsave(&fbc->lock, flags);
 136                pcount = per_cpu_ptr(fbc->counters, cpu);
 137                fbc->count += *pcount;
 138                *pcount = 0;
 139                spin_unlock_irqrestore(&fbc->lock, flags);
 140        }
 141        mutex_unlock(&percpu_counters_lock);
 142        return NOTIFY_OK;
 143}
 144
 145static int __init percpu_counter_startup(void)
 146{
 147        hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
 148        return 0;
 149}
 150module_init(percpu_counter_startup);
 151#endif
 152
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.