linux/include/linux/percpu.h
<<
>>
Prefs
   1#ifndef __LINUX_PERCPU_H
   2#define __LINUX_PERCPU_H
   3
   4#include <linux/preempt.h>
   5#include <linux/smp.h>
   6#include <linux/cpumask.h>
   7#include <linux/pfn.h>
   8#include <linux/init.h>
   9
  10#include <asm/percpu.h>
  11
  12/* enough to cover all DEFINE_PER_CPUs in modules */
  13#ifdef CONFIG_MODULES
  14#define PERCPU_MODULE_RESERVE           (8 << 10)
  15#else
  16#define PERCPU_MODULE_RESERVE           0
  17#endif
  18
  19#ifndef PERCPU_ENOUGH_ROOM
  20#define PERCPU_ENOUGH_ROOM                                              \
  21        (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) +      \
  22         PERCPU_MODULE_RESERVE)
  23#endif
  24
  25/*
  26 * Must be an lvalue. Since @var must be a simple identifier,
  27 * we force a syntax error here if it isn't.
  28 */
  29#define get_cpu_var(var) (*({                           \
  30        preempt_disable();                              \
  31        &__get_cpu_var(var); }))
  32
  33/*
  34 * The weird & is necessary because sparse considers (void)(var) to be
  35 * a direct dereference of percpu variable (var).
  36 */
  37#define put_cpu_var(var) do {                           \
  38        (void)&(var);                                   \
  39        preempt_enable();                               \
  40} while (0)
  41
  42#define get_cpu_ptr(var) ({                             \
  43        preempt_disable();                              \
  44        this_cpu_ptr(var); })
  45
  46#define put_cpu_ptr(var) do {                           \
  47        (void)(var);                                    \
  48        preempt_enable();                               \
  49} while (0)
  50
  51/* minimum unit size, also is the maximum supported allocation size */
  52#define PCPU_MIN_UNIT_SIZE              PFN_ALIGN(32 << 10)
  53
  54/*
  55 * Percpu allocator can serve percpu allocations before slab is
  56 * initialized which allows slab to depend on the percpu allocator.
  57 * The following two parameters decide how much resource to
  58 * preallocate for this.  Keep PERCPU_DYNAMIC_RESERVE equal to or
  59 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
  60 */
  61#define PERCPU_DYNAMIC_EARLY_SLOTS      128
  62#define PERCPU_DYNAMIC_EARLY_SIZE       (12 << 10)
  63
  64/*
  65 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
  66 * back on the first chunk for dynamic percpu allocation if arch is
  67 * manually allocating and mapping it for faster access (as a part of
  68 * large page mapping for example).
  69 *
  70 * The following values give between one and two pages of free space
  71 * after typical minimal boot (2-way SMP, single disk and NIC) with
  72 * both defconfig and a distro config on x86_64 and 32.  More
  73 * intelligent way to determine this would be nice.
  74 */
  75#if BITS_PER_LONG > 32
  76#define PERCPU_DYNAMIC_RESERVE          (20 << 10)
  77#else
  78#define PERCPU_DYNAMIC_RESERVE          (12 << 10)
  79#endif
  80
  81extern void *pcpu_base_addr;
  82extern const unsigned long *pcpu_unit_offsets;
  83
  84struct pcpu_group_info {
  85        int                     nr_units;       /* aligned # of units */
  86        unsigned long           base_offset;    /* base address offset */
  87        unsigned int            *cpu_map;       /* unit->cpu map, empty
  88                                                 * entries contain NR_CPUS */
  89};
  90
  91struct pcpu_alloc_info {
  92        size_t                  static_size;
  93        size_t                  reserved_size;
  94        size_t                  dyn_size;
  95        size_t                  unit_size;
  96        size_t                  atom_size;
  97        size_t                  alloc_size;
  98        size_t                  __ai_size;      /* internal, don't use */
  99        int                     nr_groups;      /* 0 if grouping unnecessary */
 100        struct pcpu_group_info  groups[];
 101};
 102
 103enum pcpu_fc {
 104        PCPU_FC_AUTO,
 105        PCPU_FC_EMBED,
 106        PCPU_FC_PAGE,
 107
 108        PCPU_FC_NR,
 109};
 110extern const char *pcpu_fc_names[PCPU_FC_NR];
 111
 112extern enum pcpu_fc pcpu_chosen_fc;
 113
 114typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
 115                                     size_t align);
 116typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
 117typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
 118typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
 119
 120extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
 121                                                             int nr_units);
 122extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
 123
 124extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
 125                                         void *base_addr);
 126
 127#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
 128extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
 129                                size_t atom_size,
 130                                pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
 131                                pcpu_fc_alloc_fn_t alloc_fn,
 132                                pcpu_fc_free_fn_t free_fn);
 133#endif
 134
 135#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
 136extern int __init pcpu_page_first_chunk(size_t reserved_size,
 137                                pcpu_fc_alloc_fn_t alloc_fn,
 138                                pcpu_fc_free_fn_t free_fn,
 139                                pcpu_fc_populate_pte_fn_t populate_pte_fn);
 140#endif
 141
 142/*
 143 * Use this to get to a cpu's version of the per-cpu object
 144 * dynamically allocated. Non-atomic access to the current CPU's
 145 * version should probably be combined with get_cpu()/put_cpu().
 146 */
 147#ifdef CONFIG_SMP
 148#define per_cpu_ptr(ptr, cpu)   SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
 149#else
 150#define per_cpu_ptr(ptr, cpu)   ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
 151#endif
 152
 153extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
 154extern bool is_kernel_percpu_address(unsigned long addr);
 155
 156#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
 157extern void __init setup_per_cpu_areas(void);
 158#endif
 159extern void __init percpu_init_late(void);
 160
 161extern void __percpu *__alloc_percpu(size_t size, size_t align);
 162extern void free_percpu(void __percpu *__pdata);
 163extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
 164
 165#define alloc_percpu(type)      \
 166        (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
 167
 168/*
 169 * Branching function to split up a function into a set of functions that
 170 * are called for different scalar sizes of the objects handled.
 171 */
 172
 173extern void __bad_size_call_parameter(void);
 174
 175#define __pcpu_size_call_return(stem, variable)                         \
 176({      typeof(variable) pscr_ret__;                                    \
 177        __verify_pcpu_ptr(&(variable));                                 \
 178        switch(sizeof(variable)) {                                      \
 179        case 1: pscr_ret__ = stem##1(variable);break;                   \
 180        case 2: pscr_ret__ = stem##2(variable);break;                   \
 181        case 4: pscr_ret__ = stem##4(variable);break;                   \
 182        case 8: pscr_ret__ = stem##8(variable);break;                   \
 183        default:                                                        \
 184                __bad_size_call_parameter();break;                      \
 185        }                                                               \
 186        pscr_ret__;                                                     \
 187})
 188
 189#define __pcpu_size_call_return2(stem, variable, ...)                   \
 190({                                                                      \
 191        typeof(variable) pscr2_ret__;                                   \
 192        __verify_pcpu_ptr(&(variable));                                 \
 193        switch(sizeof(variable)) {                                      \
 194        case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break;    \
 195        case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break;    \
 196        case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break;    \
 197        case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break;    \
 198        default:                                                        \
 199                __bad_size_call_parameter(); break;                     \
 200        }                                                               \
 201        pscr2_ret__;                                                    \
 202})
 203
 204/*
 205 * Special handling for cmpxchg_double.  cmpxchg_double is passed two
 206 * percpu variables.  The first has to be aligned to a double word
 207 * boundary and the second has to follow directly thereafter.
 208 * We enforce this on all architectures even if they don't support
 209 * a double cmpxchg instruction, since it's a cheap requirement, and it
 210 * avoids breaking the requirement for architectures with the instruction.
 211 */
 212#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...)           \
 213({                                                                      \
 214        bool pdcrb_ret__;                                               \
 215        __verify_pcpu_ptr(&pcp1);                                       \
 216        BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2));                     \
 217        VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1)));         \
 218        VM_BUG_ON((unsigned long)(&pcp2) !=                             \
 219                  (unsigned long)(&pcp1) + sizeof(pcp1));               \
 220        switch(sizeof(pcp1)) {                                          \
 221        case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break;  \
 222        case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break;  \
 223        case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break;  \
 224        case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break;  \
 225        default:                                                        \
 226                __bad_size_call_parameter(); break;                     \
 227        }                                                               \
 228        pdcrb_ret__;                                                    \
 229})
 230
 231#define __pcpu_size_call(stem, variable, ...)                           \
 232do {                                                                    \
 233        __verify_pcpu_ptr(&(variable));                                 \
 234        switch(sizeof(variable)) {                                      \
 235                case 1: stem##1(variable, __VA_ARGS__);break;           \
 236                case 2: stem##2(variable, __VA_ARGS__);break;           \
 237                case 4: stem##4(variable, __VA_ARGS__);break;           \
 238                case 8: stem##8(variable, __VA_ARGS__);break;           \
 239                default:                                                \
 240                        __bad_size_call_parameter();break;              \
 241        }                                                               \
 242} while (0)
 243
 244/*
 245 * Optimized manipulation for memory allocated through the per cpu
 246 * allocator or for addresses of per cpu variables.
 247 *
 248 * These operation guarantee exclusivity of access for other operations
 249 * on the *same* processor. The assumption is that per cpu data is only
 250 * accessed by a single processor instance (the current one).
 251 *
 252 * The first group is used for accesses that must be done in a
 253 * preemption safe way since we know that the context is not preempt
 254 * safe. Interrupts may occur. If the interrupt modifies the variable
 255 * too then RMW actions will not be reliable.
 256 *
 257 * The arch code can provide optimized functions in two ways:
 258 *
 259 * 1. Override the function completely. F.e. define this_cpu_add().
 260 *    The arch must then ensure that the various scalar format passed
 261 *    are handled correctly.
 262 *
 263 * 2. Provide functions for certain scalar sizes. F.e. provide
 264 *    this_cpu_add_2() to provide per cpu atomic operations for 2 byte
 265 *    sized RMW actions. If arch code does not provide operations for
 266 *    a scalar size then the fallback in the generic code will be
 267 *    used.
 268 */
 269
 270#define _this_cpu_generic_read(pcp)                                     \
 271({      typeof(pcp) ret__;                                              \
 272        preempt_disable();                                              \
 273        ret__ = *this_cpu_ptr(&(pcp));                                  \
 274        preempt_enable();                                               \
 275        ret__;                                                          \
 276})
 277
 278#ifndef this_cpu_read
 279# ifndef this_cpu_read_1
 280#  define this_cpu_read_1(pcp)  _this_cpu_generic_read(pcp)
 281# endif
 282# ifndef this_cpu_read_2
 283#  define this_cpu_read_2(pcp)  _this_cpu_generic_read(pcp)
 284# endif
 285# ifndef this_cpu_read_4
 286#  define this_cpu_read_4(pcp)  _this_cpu_generic_read(pcp)
 287# endif
 288# ifndef this_cpu_read_8
 289#  define this_cpu_read_8(pcp)  _this_cpu_generic_read(pcp)
 290# endif
 291# define this_cpu_read(pcp)     __pcpu_size_call_return(this_cpu_read_, (pcp))
 292#endif
 293
 294#define _this_cpu_generic_to_op(pcp, val, op)                           \
 295do {                                                                    \
 296        unsigned long flags;                                            \
 297        raw_local_irq_save(flags);                                      \
 298        *__this_cpu_ptr(&(pcp)) op val;                                 \
 299        raw_local_irq_restore(flags);                                   \
 300} while (0)
 301
 302#ifndef this_cpu_write
 303# ifndef this_cpu_write_1
 304#  define this_cpu_write_1(pcp, val)    _this_cpu_generic_to_op((pcp), (val), =)
 305# endif
 306# ifndef this_cpu_write_2
 307#  define this_cpu_write_2(pcp, val)    _this_cpu_generic_to_op((pcp), (val), =)
 308# endif
 309# ifndef this_cpu_write_4
 310#  define this_cpu_write_4(pcp, val)    _this_cpu_generic_to_op((pcp), (val), =)
 311# endif
 312# ifndef this_cpu_write_8
 313#  define this_cpu_write_8(pcp, val)    _this_cpu_generic_to_op((pcp), (val), =)
 314# endif
 315# define this_cpu_write(pcp, val)       __pcpu_size_call(this_cpu_write_, (pcp), (val))
 316#endif
 317
 318#ifndef this_cpu_add
 319# ifndef this_cpu_add_1
 320#  define this_cpu_add_1(pcp, val)      _this_cpu_generic_to_op((pcp), (val), +=)
 321# endif
 322# ifndef this_cpu_add_2
 323#  define this_cpu_add_2(pcp, val)      _this_cpu_generic_to_op((pcp), (val), +=)
 324# endif
 325# ifndef this_cpu_add_4
 326#  define this_cpu_add_4(pcp, val)      _this_cpu_generic_to_op((pcp), (val), +=)
 327# endif
 328# ifndef this_cpu_add_8
 329#  define this_cpu_add_8(pcp, val)      _this_cpu_generic_to_op((pcp), (val), +=)
 330# endif
 331# define this_cpu_add(pcp, val)         __pcpu_size_call(this_cpu_add_, (pcp), (val))
 332#endif
 333
 334#ifndef this_cpu_sub
 335# define this_cpu_sub(pcp, val)         this_cpu_add((pcp), -(val))
 336#endif
 337
 338#ifndef this_cpu_inc
 339# define this_cpu_inc(pcp)              this_cpu_add((pcp), 1)
 340#endif
 341
 342#ifndef this_cpu_dec
 343# define this_cpu_dec(pcp)              this_cpu_sub((pcp), 1)
 344#endif
 345
 346#ifndef this_cpu_and
 347# ifndef this_cpu_and_1
 348#  define this_cpu_and_1(pcp, val)      _this_cpu_generic_to_op((pcp), (val), &=)
 349# endif
 350# ifndef this_cpu_and_2
 351#  define this_cpu_and_2(pcp, val)      _this_cpu_generic_to_op((pcp), (val), &=)
 352# endif
 353# ifndef this_cpu_and_4
 354#  define this_cpu_and_4(pcp, val)      _this_cpu_generic_to_op((pcp), (val), &=)
 355# endif
 356# ifndef this_cpu_and_8
 357#  define this_cpu_and_8(pcp, val)      _this_cpu_generic_to_op((pcp), (val), &=)
 358# endif
 359# define this_cpu_and(pcp, val)         __pcpu_size_call(this_cpu_and_, (pcp), (val))
 360#endif
 361
 362#ifndef this_cpu_or
 363# ifndef this_cpu_or_1
 364#  define this_cpu_or_1(pcp, val)       _this_cpu_generic_to_op((pcp), (val), |=)
 365# endif
 366# ifndef this_cpu_or_2
 367#  define this_cpu_or_2(pcp, val)       _this_cpu_generic_to_op((pcp), (val), |=)
 368# endif
 369# ifndef this_cpu_or_4
 370#  define this_cpu_or_4(pcp, val)       _this_cpu_generic_to_op((pcp), (val), |=)
 371# endif
 372# ifndef this_cpu_or_8
 373#  define this_cpu_or_8(pcp, val)       _this_cpu_generic_to_op((pcp), (val), |=)
 374# endif
 375# define this_cpu_or(pcp, val)          __pcpu_size_call(this_cpu_or_, (pcp), (val))
 376#endif
 377
 378#ifndef this_cpu_xor
 379# ifndef this_cpu_xor_1
 380#  define this_cpu_xor_1(pcp, val)      _this_cpu_generic_to_op((pcp), (val), ^=)
 381# endif
 382# ifndef this_cpu_xor_2
 383#  define this_cpu_xor_2(pcp, val)      _this_cpu_generic_to_op((pcp), (val), ^=)
 384# endif
 385# ifndef this_cpu_xor_4
 386#  define this_cpu_xor_4(pcp, val)      _this_cpu_generic_to_op((pcp), (val), ^=)
 387# endif
 388# ifndef this_cpu_xor_8
 389#  define this_cpu_xor_8(pcp, val)      _this_cpu_generic_to_op((pcp), (val), ^=)
 390# endif
 391# define this_cpu_xor(pcp, val)         __pcpu_size_call(this_cpu_or_, (pcp), (val))
 392#endif
 393
 394#define _this_cpu_generic_add_return(pcp, val)                          \
 395({                                                                      \
 396        typeof(pcp) ret__;                                              \
 397        unsigned long flags;                                            \
 398        raw_local_irq_save(flags);                                      \
 399        __this_cpu_add(pcp, val);                                       \
 400        ret__ = __this_cpu_read(pcp);                                   \
 401        raw_local_irq_restore(flags);                                   \
 402        ret__;                                                          \
 403})
 404
 405#ifndef this_cpu_add_return
 406# ifndef this_cpu_add_return_1
 407#  define this_cpu_add_return_1(pcp, val)       _this_cpu_generic_add_return(pcp, val)
 408# endif
 409# ifndef this_cpu_add_return_2
 410#  define this_cpu_add_return_2(pcp, val)       _this_cpu_generic_add_return(pcp, val)
 411# endif
 412# ifndef this_cpu_add_return_4
 413#  define this_cpu_add_return_4(pcp, val)       _this_cpu_generic_add_return(pcp, val)
 414# endif
 415# ifndef this_cpu_add_return_8
 416#  define this_cpu_add_return_8(pcp, val)       _this_cpu_generic_add_return(pcp, val)
 417# endif
 418# define this_cpu_add_return(pcp, val)  __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
 419#endif
 420
 421#define this_cpu_sub_return(pcp, val)   this_cpu_add_return(pcp, -(val))
 422#define this_cpu_inc_return(pcp)        this_cpu_add_return(pcp, 1)
 423#define this_cpu_dec_return(pcp)        this_cpu_add_return(pcp, -1)
 424
 425#define _this_cpu_generic_xchg(pcp, nval)                               \
 426({      typeof(pcp) ret__;                                              \
 427        unsigned long flags;                                            \
 428        raw_local_irq_save(flags);                                      \
 429        ret__ = __this_cpu_read(pcp);                                   \
 430        __this_cpu_write(pcp, nval);                                    \
 431        raw_local_irq_restore(flags);                                   \
 432        ret__;                                                          \
 433})
 434
 435#ifndef this_cpu_xchg
 436# ifndef this_cpu_xchg_1
 437#  define this_cpu_xchg_1(pcp, nval)    _this_cpu_generic_xchg(pcp, nval)
 438# endif
 439# ifndef this_cpu_xchg_2
 440#  define this_cpu_xchg_2(pcp, nval)    _this_cpu_generic_xchg(pcp, nval)
 441# endif
 442# ifndef this_cpu_xchg_4
 443#  define this_cpu_xchg_4(pcp, nval)    _this_cpu_generic_xchg(pcp, nval)
 444# endif
 445# ifndef this_cpu_xchg_8
 446#  define this_cpu_xchg_8(pcp, nval)    _this_cpu_generic_xchg(pcp, nval)
 447# endif
 448# define this_cpu_xchg(pcp, nval)       \
 449        __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
 450#endif
 451
 452#define _this_cpu_generic_cmpxchg(pcp, oval, nval)                      \
 453({                                                                      \
 454        typeof(pcp) ret__;                                              \
 455        unsigned long flags;                                            \
 456        raw_local_irq_save(flags);                                      \
 457        ret__ = __this_cpu_read(pcp);                                   \
 458        if (ret__ == (oval))                                            \
 459                __this_cpu_write(pcp, nval);                            \
 460        raw_local_irq_restore(flags);                                   \
 461        ret__;                                                          \
 462})
 463
 464#ifndef this_cpu_cmpxchg
 465# ifndef this_cpu_cmpxchg_1
 466#  define this_cpu_cmpxchg_1(pcp, oval, nval)   _this_cpu_generic_cmpxchg(pcp, oval, nval)
 467# endif
 468# ifndef this_cpu_cmpxchg_2
 469#  define this_cpu_cmpxchg_2(pcp, oval, nval)   _this_cpu_generic_cmpxchg(pcp, oval, nval)
 470# endif
 471# ifndef this_cpu_cmpxchg_4
 472#  define this_cpu_cmpxchg_4(pcp, oval, nval)   _this_cpu_generic_cmpxchg(pcp, oval, nval)
 473# endif
 474# ifndef this_cpu_cmpxchg_8
 475#  define this_cpu_cmpxchg_8(pcp, oval, nval)   _this_cpu_generic_cmpxchg(pcp, oval, nval)
 476# endif
 477# define this_cpu_cmpxchg(pcp, oval, nval)      \
 478        __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
 479#endif
 480
 481/*
 482 * cmpxchg_double replaces two adjacent scalars at once.  The first
 483 * two parameters are per cpu variables which have to be of the same
 484 * size.  A truth value is returned to indicate success or failure
 485 * (since a double register result is difficult to handle).  There is
 486 * very limited hardware support for these operations, so only certain
 487 * sizes may work.
 488 */
 489#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)        \
 490({                                                                      \
 491        int ret__;                                                      \
 492        unsigned long flags;                                            \
 493        raw_local_irq_save(flags);                                      \
 494        ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2,           \
 495                        oval1, oval2, nval1, nval2);                    \
 496        raw_local_irq_restore(flags);                                   \
 497        ret__;                                                          \
 498})
 499
 500#ifndef this_cpu_cmpxchg_double
 501# ifndef this_cpu_cmpxchg_double_1
 502#  define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)     \
 503        _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
 504# endif
 505# ifndef this_cpu_cmpxchg_double_2
 506#  define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)     \
 507        _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
 508# endif
 509# ifndef this_cpu_cmpxchg_double_4
 510#  define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)     \
 511        _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
 512# endif
 513# ifndef this_cpu_cmpxchg_double_8
 514#  define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)     \
 515        _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
 516# endif
 517# define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)        \
 518        __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
 519#endif
 520
 521/*
 522 * Generic percpu operations for context that are safe from preemption/interrupts.
 523 * Either we do not care about races or the caller has the
 524 * responsibility of handling preemption/interrupt issues. Arch code can still
 525 * override these instructions since the arch per cpu code may be more
 526 * efficient and may actually get race freeness for free (that is the
 527 * case for x86 for example).
 528 *
 529 * If there is no other protection through preempt disable and/or
 530 * disabling interupts then one of these RMW operations can show unexpected
 531 * behavior because the execution thread was rescheduled on another processor
 532 * or an interrupt occurred and the same percpu variable was modified from
 533 * the interrupt context.
 534 */
 535#ifndef __this_cpu_read
 536# ifndef __this_cpu_read_1
 537#  define __this_cpu_read_1(pcp)        (*__this_cpu_ptr(&(pcp)))
 538# endif
 539# ifndef __this_cpu_read_2
 540#  define __this_cpu_read_2(pcp)        (*__this_cpu_ptr(&(pcp)))
 541# endif
 542# ifndef __this_cpu_read_4
 543#  define __this_cpu_read_4(pcp)        (*__this_cpu_ptr(&(pcp)))
 544# endif
 545# ifndef __this_cpu_read_8
 546#  define __this_cpu_read_8(pcp)        (*__this_cpu_ptr(&(pcp)))
 547# endif
 548# define __this_cpu_read(pcp)   __pcpu_size_call_return(__this_cpu_read_, (pcp))
 549#endif
 550
 551#define __this_cpu_generic_to_op(pcp, val, op)                          \
 552do {                                                                    \
 553        *__this_cpu_ptr(&(pcp)) op val;                                 \
 554} while (0)
 555
 556#ifndef __this_cpu_write
 557# ifndef __this_cpu_write_1
 558#  define __this_cpu_write_1(pcp, val)  __this_cpu_generic_to_op((pcp), (val), =)
 559# endif
 560# ifndef __this_cpu_write_2
 561#  define __this_cpu_write_2(pcp, val)  __this_cpu_generic_to_op((pcp), (val), =)
 562# endif
 563# ifndef __this_cpu_write_4
 564#  define __this_cpu_write_4(pcp, val)  __this_cpu_generic_to_op((pcp), (val), =)
 565# endif
 566# ifndef __this_cpu_write_8
 567#  define __this_cpu_write_8(pcp, val)  __this_cpu_generic_to_op((pcp), (val), =)
 568# endif
 569# define __this_cpu_write(pcp, val)     __pcpu_size_call(__this_cpu_write_, (pcp), (val))
 570#endif
 571
 572#ifndef __this_cpu_add
 573# ifndef __this_cpu_add_1
 574#  define __this_cpu_add_1(pcp, val)    __this_cpu_generic_to_op((pcp), (val), +=)
 575# endif
 576# ifndef __this_cpu_add_2
 577#  define __this_cpu_add_2(pcp, val)    __this_cpu_generic_to_op((pcp), (val), +=)
 578# endif
 579# ifndef __this_cpu_add_4
 580#  define __this_cpu_add_4(pcp, val)    __this_cpu_generic_to_op((pcp), (val), +=)
 581# endif
 582# ifndef __this_cpu_add_8
 583#  define __this_cpu_add_8(pcp, val)    __this_cpu_generic_to_op((pcp), (val), +=)
 584# endif
 585# define __this_cpu_add(pcp, val)       __pcpu_size_call(__this_cpu_add_, (pcp), (val))
 586#endif
 587
 588#ifndef __this_cpu_sub
 589# define __this_cpu_sub(pcp, val)       __this_cpu_add((pcp), -(val))
 590#endif
 591
 592#ifndef __this_cpu_inc
 593# define __this_cpu_inc(pcp)            __this_cpu_add((pcp), 1)
 594#endif
 595
 596#ifndef __this_cpu_dec
 597# define __this_cpu_dec(pcp)            __this_cpu_sub((pcp), 1)
 598#endif
 599
 600#ifndef __this_cpu_and
 601# ifndef __this_cpu_and_1
 602#  define __this_cpu_and_1(pcp, val)    __this_cpu_generic_to_op((pcp), (val), &=)
 603# endif
 604# ifndef __this_cpu_and_2
 605#  define __this_cpu_and_2(pcp, val)    __this_cpu_generic_to_op((pcp), (val), &=)
 606# endif
 607# ifndef __this_cpu_and_4
 608#  define __this_cpu_and_4(pcp, val)    __this_cpu_generic_to_op((pcp), (val), &=)
 609# endif
 610# ifndef __this_cpu_and_8
 611#  define __this_cpu_and_8(pcp, val)    __this_cpu_generic_to_op((pcp), (val), &=)
 612# endif
 613# define __this_cpu_and(pcp, val)       __pcpu_size_call(__this_cpu_and_, (pcp), (val))
 614#endif
 615
 616#ifndef __this_cpu_or
 617# ifndef __this_cpu_or_1
 618#  define __this_cpu_or_1(pcp, val)     __this_cpu_generic_to_op((pcp), (val), |=)
 619# endif
 620# ifndef __this_cpu_or_2
 621#  define __this_cpu_or_2(pcp, val)     __this_cpu_generic_to_op((pcp), (val), |=)
 622# endif
 623# ifndef __this_cpu_or_4
 624#  define __this_cpu_or_4(pcp, val)     __this_cpu_generic_to_op((pcp), (val), |=)
 625# endif
 626# ifndef __this_cpu_or_8
 627#  define __this_cpu_or_8(pcp, val)     __this_cpu_generic_to_op((pcp), (val), |=)
 628# endif
 629# define __this_cpu_or(pcp, val)        __pcpu_size_call(__this_cpu_or_, (pcp), (val))
 630#endif
 631
 632#ifndef __this_cpu_xor
 633# ifndef __this_cpu_xor_1
 634#  define __this_cpu_xor_1(pcp, val)    __this_cpu_generic_to_op((pcp), (val), ^=)
 635# endif
 636# ifndef __this_cpu_xor_2
 637#  define __this_cpu_xor_2(pcp, val)    __this_cpu_generic_to_op((pcp), (val), ^=)
 638# endif
 639# ifndef __this_cpu_xor_4
 640#  define __this_cpu_xor_4(pcp, val)    __this_cpu_generic_to_op((pcp), (val), ^=)
 641# endif
 642# ifndef __this_cpu_xor_8
 643#  define __this_cpu_xor_8(pcp, val)    __this_cpu_generic_to_op((pcp), (val), ^=)
 644# endif
 645# define __this_cpu_xor(pcp, val)       __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
 646#endif
 647
 648#define __this_cpu_generic_add_return(pcp, val)                         \
 649({                                                                      \
 650        __this_cpu_add(pcp, val);                                       \
 651        __this_cpu_read(pcp);                                           \
 652})
 653
 654#ifndef __this_cpu_add_return
 655# ifndef __this_cpu_add_return_1
 656#  define __this_cpu_add_return_1(pcp, val)     __this_cpu_generic_add_return(pcp, val)
 657# endif
 658# ifndef __this_cpu_add_return_2
 659#  define __this_cpu_add_return_2(pcp, val)     __this_cpu_generic_add_return(pcp, val)
 660# endif
 661# ifndef __this_cpu_add_return_4
 662#  define __this_cpu_add_return_4(pcp, val)     __this_cpu_generic_add_return(pcp, val)
 663# endif
 664# ifndef __this_cpu_add_return_8
 665#  define __this_cpu_add_return_8(pcp, val)     __this_cpu_generic_add_return(pcp, val)
 666# endif
 667# define __this_cpu_add_return(pcp, val)        \
 668        __pcpu_size_call_return2(__this_cpu_add_return_, pcp, val)
 669#endif
 670
 671#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(val))
 672#define __this_cpu_inc_return(pcp)      __this_cpu_add_return(pcp, 1)
 673#define __this_cpu_dec_return(pcp)      __this_cpu_add_return(pcp, -1)
 674
 675#define __this_cpu_generic_xchg(pcp, nval)                              \
 676({      typeof(pcp) ret__;                                              \
 677        ret__ = __this_cpu_read(pcp);                                   \
 678        __this_cpu_write(pcp, nval);                                    \
 679        ret__;                                                          \
 680})
 681
 682#ifndef __this_cpu_xchg
 683# ifndef __this_cpu_xchg_1
 684#  define __this_cpu_xchg_1(pcp, nval)  __this_cpu_generic_xchg(pcp, nval)
 685# endif
 686# ifndef __this_cpu_xchg_2
 687#  define __this_cpu_xchg_2(pcp, nval)  __this_cpu_generic_xchg(pcp, nval)
 688# endif
 689# ifndef __this_cpu_xchg_4
 690#  define __this_cpu_xchg_4(pcp, nval)  __this_cpu_generic_xchg(pcp, nval)
 691# endif
 692# ifndef __this_cpu_xchg_8
 693#  define __this_cpu_xchg_8(pcp, nval)  __this_cpu_generic_xchg(pcp, nval)
 694# endif
 695# define __this_cpu_xchg(pcp, nval)     \
 696        __pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval)
 697#endif
 698
 699#define __this_cpu_generic_cmpxchg(pcp, oval, nval)                     \
 700({                                                                      \
 701        typeof(pcp) ret__;                                              \
 702        ret__ = __this_cpu_read(pcp);                                   \
 703        if (ret__ == (oval))                                            \
 704                __this_cpu_write(pcp, nval);                            \
 705        ret__;                                                          \
 706})
 707
 708#ifndef __this_cpu_cmpxchg
 709# ifndef __this_cpu_cmpxchg_1
 710#  define __this_cpu_cmpxchg_1(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
 711# endif
 712# ifndef __this_cpu_cmpxchg_2
 713#  define __this_cpu_cmpxchg_2(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
 714# endif
 715# ifndef __this_cpu_cmpxchg_4
 716#  define __this_cpu_cmpxchg_4(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
 717# endif
 718# ifndef __this_cpu_cmpxchg_8
 719#  define __this_cpu_cmpxchg_8(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
 720# endif
 721# define __this_cpu_cmpxchg(pcp, oval, nval)    \
 722        __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
 723#endif
 724
 725#define __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)       \
 726({                                                                      \
 727        int __ret = 0;                                                  \
 728        if (__this_cpu_read(pcp1) == (oval1) &&                         \
 729                         __this_cpu_read(pcp2)  == (oval2)) {           \
 730                __this_cpu_write(pcp1, (nval1));                        \
 731                __this_cpu_write(pcp2, (nval2));                        \
 732                __ret = 1;                                              \
 733        }                                                               \
 734        (__ret);                                                        \
 735})
 736
 737#ifndef __this_cpu_cmpxchg_double
 738# ifndef __this_cpu_cmpxchg_double_1
 739#  define __this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)   \
 740        __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
 741# endif
 742# ifndef __this_cpu_cmpxchg_double_2
 743#  define __this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)   \
 744        __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
 745# endif
 746# ifndef __this_cpu_cmpxchg_double_4
 747#  define __this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)   \
 748        __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
 749# endif
 750# ifndef __this_cpu_cmpxchg_double_8
 751#  define __this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)   \
 752        __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
 753# endif
 754# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)      \
 755        __pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
 756#endif
 757
 758#endif /* __LINUX_PERCPU_H */
 759
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.