linux/include/linux/percpu_counter.h
<<
>>
Prefs
   1#ifndef _LINUX_PERCPU_COUNTER_H
   2#define _LINUX_PERCPU_COUNTER_H
   3/*
   4 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
   5 *
   6 * WARNING: these things are HUGE.  4 kbytes per counter on 32-way P4.
   7 */
   8
   9#include <linux/spinlock.h>
  10#include <linux/smp.h>
  11#include <linux/list.h>
  12#include <linux/threads.h>
  13#include <linux/percpu.h>
  14#include <linux/types.h>
  15
  16#ifdef CONFIG_SMP
  17
  18struct percpu_counter {
  19        raw_spinlock_t lock;
  20        s64 count;
  21#ifdef CONFIG_HOTPLUG_CPU
  22        struct list_head list;  /* All percpu_counters are on a list */
  23#endif
  24        s32 __percpu *counters;
  25};
  26
  27extern int percpu_counter_batch;
  28
  29int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
  30                          struct lock_class_key *key);
  31
  32#define percpu_counter_init(fbc, value)                                 \
  33        ({                                                              \
  34                static struct lock_class_key __key;                     \
  35                                                                        \
  36                __percpu_counter_init(fbc, value, &__key);              \
  37        })
  38
  39void percpu_counter_destroy(struct percpu_counter *fbc);
  40void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
  41void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
  42s64 __percpu_counter_sum(struct percpu_counter *fbc);
  43int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs);
  44
  45static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
  46{
  47        __percpu_counter_add(fbc, amount, percpu_counter_batch);
  48}
  49
  50static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
  51{
  52        s64 ret = __percpu_counter_sum(fbc);
  53        return ret < 0 ? 0 : ret;
  54}
  55
  56static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
  57{
  58        return __percpu_counter_sum(fbc);
  59}
  60
  61static inline s64 percpu_counter_read(struct percpu_counter *fbc)
  62{
  63        return fbc->count;
  64}
  65
  66/*
  67 * It is possible for the percpu_counter_read() to return a small negative
  68 * number for some counter which should never be negative.
  69 *
  70 */
  71static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
  72{
  73        s64 ret = fbc->count;
  74
  75        barrier();              /* Prevent reloads of fbc->count */
  76        if (ret >= 0)
  77                return ret;
  78        return 0;
  79}
  80
  81static inline int percpu_counter_initialized(struct percpu_counter *fbc)
  82{
  83        return (fbc->counters != NULL);
  84}
  85
  86#else /* !CONFIG_SMP */
  87
  88struct percpu_counter {
  89        s64 count;
  90};
  91
  92static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
  93{
  94        fbc->count = amount;
  95        return 0;
  96}
  97
  98static inline void percpu_counter_destroy(struct percpu_counter *fbc)
  99{
 100}
 101
 102static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
 103{
 104        fbc->count = amount;
 105}
 106
 107static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
 108{
 109        if (fbc->count > rhs)
 110                return 1;
 111        else if (fbc->count < rhs)
 112                return -1;
 113        else
 114                return 0;
 115}
 116
 117static inline void
 118percpu_counter_add(struct percpu_counter *fbc, s64 amount)
 119{
 120        preempt_disable();
 121        fbc->count += amount;
 122        preempt_enable();
 123}
 124
 125static inline void
 126__percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
 127{
 128        percpu_counter_add(fbc, amount);
 129}
 130
 131static inline s64 percpu_counter_read(struct percpu_counter *fbc)
 132{
 133        return fbc->count;
 134}
 135
 136/*
 137 * percpu_counter is intended to track positive numbers. In the UP case the
 138 * number should never be negative.
 139 */
 140static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
 141{
 142        return fbc->count;
 143}
 144
 145static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
 146{
 147        return percpu_counter_read_positive(fbc);
 148}
 149
 150static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
 151{
 152        return percpu_counter_read(fbc);
 153}
 154
 155static inline int percpu_counter_initialized(struct percpu_counter *fbc)
 156{
 157        return 1;
 158}
 159
 160#endif  /* CONFIG_SMP */
 161
 162static inline void percpu_counter_inc(struct percpu_counter *fbc)
 163{
 164        percpu_counter_add(fbc, 1);
 165}
 166
 167static inline void percpu_counter_dec(struct percpu_counter *fbc)
 168{
 169        percpu_counter_add(fbc, -1);
 170}
 171
 172static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
 173{
 174        percpu_counter_add(fbc, -amount);
 175}
 176
 177#endif /* _LINUX_PERCPU_COUNTER_H */
 178
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.