linux/include/linux/rcupdate.h
<<
>>
Prefs
   1/*
   2 * Read-Copy Update mechanism for mutual exclusion
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17 *
  18 * Copyright IBM Corporation, 2001
  19 *
  20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
  21 *
  22 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
  23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
  24 * Papers:
  25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
  26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
  27 *
  28 * For detailed explanation of Read-Copy Update mechanism see -
  29 *              http://lse.sourceforge.net/locking/rcupdate.html
  30 *
  31 */
  32
  33#ifndef __LINUX_RCUPDATE_H
  34#define __LINUX_RCUPDATE_H
  35
  36#include <linux/types.h>
  37#include <linux/cache.h>
  38#include <linux/spinlock.h>
  39#include <linux/threads.h>
  40#include <linux/cpumask.h>
  41#include <linux/seqlock.h>
  42#include <linux/lockdep.h>
  43#include <linux/completion.h>
  44#include <linux/debugobjects.h>
  45#include <linux/bug.h>
  46#include <linux/compiler.h>
  47
  48#ifdef CONFIG_RCU_TORTURE_TEST
  49extern int rcutorture_runnable; /* for sysctl */
  50#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
  51
  52#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
  53extern void rcutorture_record_test_transition(void);
  54extern void rcutorture_record_progress(unsigned long vernum);
  55extern void do_trace_rcu_torture_read(char *rcutorturename,
  56                                      struct rcu_head *rhp,
  57                                      unsigned long secs,
  58                                      unsigned long c_old,
  59                                      unsigned long c);
  60#else
  61static inline void rcutorture_record_test_transition(void)
  62{
  63}
  64static inline void rcutorture_record_progress(unsigned long vernum)
  65{
  66}
  67#ifdef CONFIG_RCU_TRACE
  68extern void do_trace_rcu_torture_read(char *rcutorturename,
  69                                      struct rcu_head *rhp,
  70                                      unsigned long secs,
  71                                      unsigned long c_old,
  72                                      unsigned long c);
  73#else
  74#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
  75        do { } while (0)
  76#endif
  77#endif
  78
  79#define UINT_CMP_GE(a, b)       (UINT_MAX / 2 >= (a) - (b))
  80#define UINT_CMP_LT(a, b)       (UINT_MAX / 2 < (a) - (b))
  81#define ULONG_CMP_GE(a, b)      (ULONG_MAX / 2 >= (a) - (b))
  82#define ULONG_CMP_LT(a, b)      (ULONG_MAX / 2 < (a) - (b))
  83
  84/* Exported common interfaces */
  85
  86#ifdef CONFIG_PREEMPT_RCU
  87
  88/**
  89 * call_rcu() - Queue an RCU callback for invocation after a grace period.
  90 * @head: structure to be used for queueing the RCU updates.
  91 * @func: actual callback function to be invoked after the grace period
  92 *
  93 * The callback function will be invoked some time after a full grace
  94 * period elapses, in other words after all pre-existing RCU read-side
  95 * critical sections have completed.  However, the callback function
  96 * might well execute concurrently with RCU read-side critical sections
  97 * that started after call_rcu() was invoked.  RCU read-side critical
  98 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
  99 * and may be nested.
 100 *
 101 * Note that all CPUs must agree that the grace period extended beyond
 102 * all pre-existing RCU read-side critical section.  On systems with more
 103 * than one CPU, this means that when "func()" is invoked, each CPU is
 104 * guaranteed to have executed a full memory barrier since the end of its
 105 * last RCU read-side critical section whose beginning preceded the call
 106 * to call_rcu().  It also means that each CPU executing an RCU read-side
 107 * critical section that continues beyond the start of "func()" must have
 108 * executed a memory barrier after the call_rcu() but before the beginning
 109 * of that RCU read-side critical section.  Note that these guarantees
 110 * include CPUs that are offline, idle, or executing in user mode, as
 111 * well as CPUs that are executing in the kernel.
 112 *
 113 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
 114 * resulting RCU callback function "func()", then both CPU A and CPU B are
 115 * guaranteed to execute a full memory barrier during the time interval
 116 * between the call to call_rcu() and the invocation of "func()" -- even
 117 * if CPU A and CPU B are the same CPU (but again only if the system has
 118 * more than one CPU).
 119 */
 120extern void call_rcu(struct rcu_head *head,
 121                              void (*func)(struct rcu_head *head));
 122
 123#else /* #ifdef CONFIG_PREEMPT_RCU */
 124
 125/* In classic RCU, call_rcu() is just call_rcu_sched(). */
 126#define call_rcu        call_rcu_sched
 127
 128#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
 129
 130/**
 131 * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
 132 * @head: structure to be used for queueing the RCU updates.
 133 * @func: actual callback function to be invoked after the grace period
 134 *
 135 * The callback function will be invoked some time after a full grace
 136 * period elapses, in other words after all currently executing RCU
 137 * read-side critical sections have completed. call_rcu_bh() assumes
 138 * that the read-side critical sections end on completion of a softirq
 139 * handler. This means that read-side critical sections in process
 140 * context must not be interrupted by softirqs. This interface is to be
 141 * used when most of the read-side critical sections are in softirq context.
 142 * RCU read-side critical sections are delimited by :
 143 *  - rcu_read_lock() and  rcu_read_unlock(), if in interrupt context.
 144 *  OR
 145 *  - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
 146 *  These may be nested.
 147 *
 148 * See the description of call_rcu() for more detailed information on
 149 * memory ordering guarantees.
 150 */
 151extern void call_rcu_bh(struct rcu_head *head,
 152                        void (*func)(struct rcu_head *head));
 153
 154/**
 155 * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
 156 * @head: structure to be used for queueing the RCU updates.
 157 * @func: actual callback function to be invoked after the grace period
 158 *
 159 * The callback function will be invoked some time after a full grace
 160 * period elapses, in other words after all currently executing RCU
 161 * read-side critical sections have completed. call_rcu_sched() assumes
 162 * that the read-side critical sections end on enabling of preemption
 163 * or on voluntary preemption.
 164 * RCU read-side critical sections are delimited by :
 165 *  - rcu_read_lock_sched() and  rcu_read_unlock_sched(),
 166 *  OR
 167 *  anything that disables preemption.
 168 *  These may be nested.
 169 *
 170 * See the description of call_rcu() for more detailed information on
 171 * memory ordering guarantees.
 172 */
 173extern void call_rcu_sched(struct rcu_head *head,
 174                           void (*func)(struct rcu_head *rcu));
 175
 176extern void synchronize_sched(void);
 177
 178#ifdef CONFIG_PREEMPT_RCU
 179
 180extern void __rcu_read_lock(void);
 181extern void __rcu_read_unlock(void);
 182extern void rcu_read_unlock_special(struct task_struct *t);
 183void synchronize_rcu(void);
 184
 185/*
 186 * Defined as a macro as it is a very low level header included from
 187 * areas that don't even know about current.  This gives the rcu_read_lock()
 188 * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
 189 * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
 190 */
 191#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
 192
 193#else /* #ifdef CONFIG_PREEMPT_RCU */
 194
 195static inline void __rcu_read_lock(void)
 196{
 197        preempt_disable();
 198}
 199
 200static inline void __rcu_read_unlock(void)
 201{
 202        preempt_enable();
 203}
 204
 205static inline void synchronize_rcu(void)
 206{
 207        synchronize_sched();
 208}
 209
 210static inline int rcu_preempt_depth(void)
 211{
 212        return 0;
 213}
 214
 215#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
 216
 217/* Internal to kernel */
 218extern void rcu_sched_qs(int cpu);
 219extern void rcu_bh_qs(int cpu);
 220extern void rcu_check_callbacks(int cpu, int user);
 221struct notifier_block;
 222extern void rcu_idle_enter(void);
 223extern void rcu_idle_exit(void);
 224extern void rcu_irq_enter(void);
 225extern void rcu_irq_exit(void);
 226
 227#ifdef CONFIG_RCU_USER_QS
 228extern void rcu_user_enter(void);
 229extern void rcu_user_exit(void);
 230extern void rcu_user_enter_after_irq(void);
 231extern void rcu_user_exit_after_irq(void);
 232#else
 233static inline void rcu_user_enter(void) { }
 234static inline void rcu_user_exit(void) { }
 235static inline void rcu_user_enter_after_irq(void) { }
 236static inline void rcu_user_exit_after_irq(void) { }
 237static inline void rcu_user_hooks_switch(struct task_struct *prev,
 238                                         struct task_struct *next) { }
 239#endif /* CONFIG_RCU_USER_QS */
 240
 241extern void exit_rcu(void);
 242
 243/**
 244 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
 245 * @a: Code that RCU needs to pay attention to.
 246 *
 247 * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
 248 * in the inner idle loop, that is, between the rcu_idle_enter() and
 249 * the rcu_idle_exit() -- RCU will happily ignore any such read-side
 250 * critical sections.  However, things like powertop need tracepoints
 251 * in the inner idle loop.
 252 *
 253 * This macro provides the way out:  RCU_NONIDLE(do_something_with_RCU())
 254 * will tell RCU that it needs to pay attending, invoke its argument
 255 * (in this example, a call to the do_something_with_RCU() function),
 256 * and then tell RCU to go back to ignoring this CPU.  It is permissible
 257 * to nest RCU_NONIDLE() wrappers, but the nesting level is currently
 258 * quite limited.  If deeper nesting is required, it will be necessary
 259 * to adjust DYNTICK_TASK_NESTING_VALUE accordingly.
 260 */
 261#define RCU_NONIDLE(a) \
 262        do { \
 263                rcu_irq_enter(); \
 264                do { a; } while (0); \
 265                rcu_irq_exit(); \
 266        } while (0)
 267
 268/*
 269 * Infrastructure to implement the synchronize_() primitives in
 270 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
 271 */
 272
 273typedef void call_rcu_func_t(struct rcu_head *head,
 274                             void (*func)(struct rcu_head *head));
 275void wait_rcu_gp(call_rcu_func_t crf);
 276
 277#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
 278#include <linux/rcutree.h>
 279#elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
 280#include <linux/rcutiny.h>
 281#else
 282#error "Unknown RCU implementation specified to kernel configuration"
 283#endif
 284
 285/*
 286 * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
 287 * initialization and destruction of rcu_head on the stack. rcu_head structures
 288 * allocated dynamically in the heap or defined statically don't need any
 289 * initialization.
 290 */
 291#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
 292extern void init_rcu_head_on_stack(struct rcu_head *head);
 293extern void destroy_rcu_head_on_stack(struct rcu_head *head);
 294#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
 295static inline void init_rcu_head_on_stack(struct rcu_head *head)
 296{
 297}
 298
 299static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
 300{
 301}
 302#endif  /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
 303
 304#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP)
 305extern int rcu_is_cpu_idle(void);
 306#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) */
 307
 308#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
 309bool rcu_lockdep_current_cpu_online(void);
 310#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
 311static inline bool rcu_lockdep_current_cpu_online(void)
 312{
 313        return 1;
 314}
 315#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
 316
 317#ifdef CONFIG_DEBUG_LOCK_ALLOC
 318
 319static inline void rcu_lock_acquire(struct lockdep_map *map)
 320{
 321        lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
 322}
 323
 324static inline void rcu_lock_release(struct lockdep_map *map)
 325{
 326        lock_release(map, 1, _THIS_IP_);
 327}
 328
 329extern struct lockdep_map rcu_lock_map;
 330extern struct lockdep_map rcu_bh_lock_map;
 331extern struct lockdep_map rcu_sched_lock_map;
 332extern int debug_lockdep_rcu_enabled(void);
 333
 334/**
 335 * rcu_read_lock_held() - might we be in RCU read-side critical section?
 336 *
 337 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
 338 * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
 339 * this assumes we are in an RCU read-side critical section unless it can
 340 * prove otherwise.  This is useful for debug checks in functions that
 341 * require that they be called within an RCU read-side critical section.
 342 *
 343 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
 344 * and while lockdep is disabled.
 345 *
 346 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
 347 * occur in the same context, for example, it is illegal to invoke
 348 * rcu_read_unlock() in process context if the matching rcu_read_lock()
 349 * was invoked from within an irq handler.
 350 *
 351 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
 352 * offline from an RCU perspective, so check for those as well.
 353 */
 354static inline int rcu_read_lock_held(void)
 355{
 356        if (!debug_lockdep_rcu_enabled())
 357                return 1;
 358        if (rcu_is_cpu_idle())
 359                return 0;
 360        if (!rcu_lockdep_current_cpu_online())
 361                return 0;
 362        return lock_is_held(&rcu_lock_map);
 363}
 364
 365/*
 366 * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
 367 * hell.
 368 */
 369extern int rcu_read_lock_bh_held(void);
 370
 371/**
 372 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
 373 *
 374 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
 375 * RCU-sched read-side critical section.  In absence of
 376 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
 377 * critical section unless it can prove otherwise.  Note that disabling
 378 * of preemption (including disabling irqs) counts as an RCU-sched
 379 * read-side critical section.  This is useful for debug checks in functions
 380 * that required that they be called within an RCU-sched read-side
 381 * critical section.
 382 *
 383 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
 384 * and while lockdep is disabled.
 385 *
 386 * Note that if the CPU is in the idle loop from an RCU point of
 387 * view (ie: that we are in the section between rcu_idle_enter() and
 388 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
 389 * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
 390 * that are in such a section, considering these as in extended quiescent
 391 * state, so such a CPU is effectively never in an RCU read-side critical
 392 * section regardless of what RCU primitives it invokes.  This state of
 393 * affairs is required --- we need to keep an RCU-free window in idle
 394 * where the CPU may possibly enter into low power mode. This way we can
 395 * notice an extended quiescent state to other CPUs that started a grace
 396 * period. Otherwise we would delay any grace period as long as we run in
 397 * the idle task.
 398 *
 399 * Similarly, we avoid claiming an SRCU read lock held if the current
 400 * CPU is offline.
 401 */
 402#ifdef CONFIG_PREEMPT_COUNT
 403static inline int rcu_read_lock_sched_held(void)
 404{
 405        int lockdep_opinion = 0;
 406
 407        if (!debug_lockdep_rcu_enabled())
 408                return 1;
 409        if (rcu_is_cpu_idle())
 410                return 0;
 411        if (!rcu_lockdep_current_cpu_online())
 412                return 0;
 413        if (debug_locks)
 414                lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
 415        return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
 416}
 417#else /* #ifdef CONFIG_PREEMPT_COUNT */
 418static inline int rcu_read_lock_sched_held(void)
 419{
 420        return 1;
 421}
 422#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
 423
 424#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 425
 426# define rcu_lock_acquire(a)            do { } while (0)
 427# define rcu_lock_release(a)            do { } while (0)
 428
 429static inline int rcu_read_lock_held(void)
 430{
 431        return 1;
 432}
 433
 434static inline int rcu_read_lock_bh_held(void)
 435{
 436        return 1;
 437}
 438
 439#ifdef CONFIG_PREEMPT_COUNT
 440static inline int rcu_read_lock_sched_held(void)
 441{
 442        return preempt_count() != 0 || irqs_disabled();
 443}
 444#else /* #ifdef CONFIG_PREEMPT_COUNT */
 445static inline int rcu_read_lock_sched_held(void)
 446{
 447        return 1;
 448}
 449#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
 450
 451#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 452
 453#ifdef CONFIG_PROVE_RCU
 454
 455extern int rcu_my_thread_group_empty(void);
 456
 457/**
 458 * rcu_lockdep_assert - emit lockdep splat if specified condition not met
 459 * @c: condition to check
 460 * @s: informative message
 461 */
 462#define rcu_lockdep_assert(c, s)                                        \
 463        do {                                                            \
 464                static bool __section(.data.unlikely) __warned;         \
 465                if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
 466                        __warned = true;                                \
 467                        lockdep_rcu_suspicious(__FILE__, __LINE__, s);  \
 468                }                                                       \
 469        } while (0)
 470
 471#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
 472static inline void rcu_preempt_sleep_check(void)
 473{
 474        rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
 475                           "Illegal context switch in RCU read-side critical section");
 476}
 477#else /* #ifdef CONFIG_PROVE_RCU */
 478static inline void rcu_preempt_sleep_check(void)
 479{
 480}
 481#endif /* #else #ifdef CONFIG_PROVE_RCU */
 482
 483#define rcu_sleep_check()                                               \
 484        do {                                                            \
 485                rcu_preempt_sleep_check();                              \
 486                rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),     \
 487                                   "Illegal context switch in RCU-bh"   \
 488                                   " read-side critical section");      \
 489                rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),  \
 490                                   "Illegal context switch in RCU-sched"\
 491                                   " read-side critical section");      \
 492        } while (0)
 493
 494#else /* #ifdef CONFIG_PROVE_RCU */
 495
 496#define rcu_lockdep_assert(c, s) do { } while (0)
 497#define rcu_sleep_check() do { } while (0)
 498
 499#endif /* #else #ifdef CONFIG_PROVE_RCU */
 500
 501/*
 502 * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
 503 * and rcu_assign_pointer().  Some of these could be folded into their
 504 * callers, but they are left separate in order to ease introduction of
 505 * multiple flavors of pointers to match the multiple flavors of RCU
 506 * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
 507 * the future.
 508 */
 509
 510#ifdef __CHECKER__
 511#define rcu_dereference_sparse(p, space) \
 512        ((void)(((typeof(*p) space *)p) == p))
 513#else /* #ifdef __CHECKER__ */
 514#define rcu_dereference_sparse(p, space)
 515#endif /* #else #ifdef __CHECKER__ */
 516
 517#define __rcu_access_pointer(p, space) \
 518        ({ \
 519                typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
 520                rcu_dereference_sparse(p, space); \
 521                ((typeof(*p) __force __kernel *)(_________p1)); \
 522        })
 523#define __rcu_dereference_check(p, c, space) \
 524        ({ \
 525                typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
 526                rcu_lockdep_assert(c, "suspicious rcu_dereference_check()" \
 527                                      " usage"); \
 528                rcu_dereference_sparse(p, space); \
 529                smp_read_barrier_depends(); \
 530                ((typeof(*p) __force __kernel *)(_________p1)); \
 531        })
 532#define __rcu_dereference_protected(p, c, space) \
 533        ({ \
 534                rcu_lockdep_assert(c, "suspicious rcu_dereference_protected()" \
 535                                      " usage"); \
 536                rcu_dereference_sparse(p, space); \
 537                ((typeof(*p) __force __kernel *)(p)); \
 538        })
 539
 540#define __rcu_access_index(p, space) \
 541        ({ \
 542                typeof(p) _________p1 = ACCESS_ONCE(p); \
 543                rcu_dereference_sparse(p, space); \
 544                (_________p1); \
 545        })
 546#define __rcu_dereference_index_check(p, c) \
 547        ({ \
 548                typeof(p) _________p1 = ACCESS_ONCE(p); \
 549                rcu_lockdep_assert(c, \
 550                                   "suspicious rcu_dereference_index_check()" \
 551                                   " usage"); \
 552                smp_read_barrier_depends(); \
 553                (_________p1); \
 554        })
 555#define __rcu_assign_pointer(p, v, space) \
 556        do { \
 557                smp_wmb(); \
 558                (p) = (typeof(*v) __force space *)(v); \
 559        } while (0)
 560
 561
 562/**
 563 * rcu_access_pointer() - fetch RCU pointer with no dereferencing
 564 * @p: The pointer to read
 565 *
 566 * Return the value of the specified RCU-protected pointer, but omit the
 567 * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
 568 * when the value of this pointer is accessed, but the pointer is not
 569 * dereferenced, for example, when testing an RCU-protected pointer against
 570 * NULL.  Although rcu_access_pointer() may also be used in cases where
 571 * update-side locks prevent the value of the pointer from changing, you
 572 * should instead use rcu_dereference_protected() for this use case.
 573 *
 574 * It is also permissible to use rcu_access_pointer() when read-side
 575 * access to the pointer was removed at least one grace period ago, as
 576 * is the case in the context of the RCU callback that is freeing up
 577 * the data, or after a synchronize_rcu() returns.  This can be useful
 578 * when tearing down multi-linked structures after a grace period
 579 * has elapsed.
 580 */
 581#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
 582
 583/**
 584 * rcu_dereference_check() - rcu_dereference with debug checking
 585 * @p: The pointer to read, prior to dereferencing
 586 * @c: The conditions under which the dereference will take place
 587 *
 588 * Do an rcu_dereference(), but check that the conditions under which the
 589 * dereference will take place are correct.  Typically the conditions
 590 * indicate the various locking conditions that should be held at that
 591 * point.  The check should return true if the conditions are satisfied.
 592 * An implicit check for being in an RCU read-side critical section
 593 * (rcu_read_lock()) is included.
 594 *
 595 * For example:
 596 *
 597 *      bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
 598 *
 599 * could be used to indicate to lockdep that foo->bar may only be dereferenced
 600 * if either rcu_read_lock() is held, or that the lock required to replace
 601 * the bar struct at foo->bar is held.
 602 *
 603 * Note that the list of conditions may also include indications of when a lock
 604 * need not be held, for example during initialisation or destruction of the
 605 * target struct:
 606 *
 607 *      bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
 608 *                                            atomic_read(&foo->usage) == 0);
 609 *
 610 * Inserts memory barriers on architectures that require them
 611 * (currently only the Alpha), prevents the compiler from refetching
 612 * (and from merging fetches), and, more importantly, documents exactly
 613 * which pointers are protected by RCU and checks that the pointer is
 614 * annotated as __rcu.
 615 */
 616#define rcu_dereference_check(p, c) \
 617        __rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu)
 618
 619/**
 620 * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
 621 * @p: The pointer to read, prior to dereferencing
 622 * @c: The conditions under which the dereference will take place
 623 *
 624 * This is the RCU-bh counterpart to rcu_dereference_check().
 625 */
 626#define rcu_dereference_bh_check(p, c) \
 627        __rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu)
 628
 629/**
 630 * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
 631 * @p: The pointer to read, prior to dereferencing
 632 * @c: The conditions under which the dereference will take place
 633 *
 634 * This is the RCU-sched counterpart to rcu_dereference_check().
 635 */
 636#define rcu_dereference_sched_check(p, c) \
 637        __rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \
 638                                __rcu)
 639
 640#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
 641
 642/**
 643 * rcu_access_index() - fetch RCU index with no dereferencing
 644 * @p: The index to read
 645 *
 646 * Return the value of the specified RCU-protected index, but omit the
 647 * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
 648 * when the value of this index is accessed, but the index is not
 649 * dereferenced, for example, when testing an RCU-protected index against
 650 * -1.  Although rcu_access_index() may also be used in cases where
 651 * update-side locks prevent the value of the index from changing, you
 652 * should instead use rcu_dereference_index_protected() for this use case.
 653 */
 654#define rcu_access_index(p) __rcu_access_index((p), __rcu)
 655
 656/**
 657 * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
 658 * @p: The pointer to read, prior to dereferencing
 659 * @c: The conditions under which the dereference will take place
 660 *
 661 * Similar to rcu_dereference_check(), but omits the sparse checking.
 662 * This allows rcu_dereference_index_check() to be used on integers,
 663 * which can then be used as array indices.  Attempting to use
 664 * rcu_dereference_check() on an integer will give compiler warnings
 665 * because the sparse address-space mechanism relies on dereferencing
 666 * the RCU-protected pointer.  Dereferencing integers is not something
 667 * that even gcc will put up with.
 668 *
 669 * Note that this function does not implicitly check for RCU read-side
 670 * critical sections.  If this function gains lots of uses, it might
 671 * make sense to provide versions for each flavor of RCU, but it does
 672 * not make sense as of early 2010.
 673 */
 674#define rcu_dereference_index_check(p, c) \
 675        __rcu_dereference_index_check((p), (c))
 676
 677/**
 678 * rcu_dereference_protected() - fetch RCU pointer when updates prevented
 679 * @p: The pointer to read, prior to dereferencing
 680 * @c: The conditions under which the dereference will take place
 681 *
 682 * Return the value of the specified RCU-protected pointer, but omit
 683 * both the smp_read_barrier_depends() and the ACCESS_ONCE().  This
 684 * is useful in cases where update-side locks prevent the value of the
 685 * pointer from changing.  Please note that this primitive does -not-
 686 * prevent the compiler from repeating this reference or combining it
 687 * with other references, so it should not be used without protection
 688 * of appropriate locks.
 689 *
 690 * This function is only for update-side use.  Using this function
 691 * when protected only by rcu_read_lock() will result in infrequent
 692 * but very ugly failures.
 693 */
 694#define rcu_dereference_protected(p, c) \
 695        __rcu_dereference_protected((p), (c), __rcu)
 696
 697
 698/**
 699 * rcu_dereference() - fetch RCU-protected pointer for dereferencing
 700 * @p: The pointer to read, prior to dereferencing
 701 *
 702 * This is a simple wrapper around rcu_dereference_check().
 703 */
 704#define rcu_dereference(p) rcu_dereference_check(p, 0)
 705
 706/**
 707 * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
 708 * @p: The pointer to read, prior to dereferencing
 709 *
 710 * Makes rcu_dereference_check() do the dirty work.
 711 */
 712#define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
 713
 714/**
 715 * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
 716 * @p: The pointer to read, prior to dereferencing
 717 *
 718 * Makes rcu_dereference_check() do the dirty work.
 719 */
 720#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
 721
 722/**
 723 * rcu_read_lock() - mark the beginning of an RCU read-side critical section
 724 *
 725 * When synchronize_rcu() is invoked on one CPU while other CPUs
 726 * are within RCU read-side critical sections, then the
 727 * synchronize_rcu() is guaranteed to block until after all the other
 728 * CPUs exit their critical sections.  Similarly, if call_rcu() is invoked
 729 * on one CPU while other CPUs are within RCU read-side critical
 730 * sections, invocation of the corresponding RCU callback is deferred
 731 * until after the all the other CPUs exit their critical sections.
 732 *
 733 * Note, however, that RCU callbacks are permitted to run concurrently
 734 * with new RCU read-side critical sections.  One way that this can happen
 735 * is via the following sequence of events: (1) CPU 0 enters an RCU
 736 * read-side critical section, (2) CPU 1 invokes call_rcu() to register
 737 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
 738 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
 739 * callback is invoked.  This is legal, because the RCU read-side critical
 740 * section that was running concurrently with the call_rcu() (and which
 741 * therefore might be referencing something that the corresponding RCU
 742 * callback would free up) has completed before the corresponding
 743 * RCU callback is invoked.
 744 *
 745 * RCU read-side critical sections may be nested.  Any deferred actions
 746 * will be deferred until the outermost RCU read-side critical section
 747 * completes.
 748 *
 749 * You can avoid reading and understanding the next paragraph by
 750 * following this rule: don't put anything in an rcu_read_lock() RCU
 751 * read-side critical section that would block in a !PREEMPT kernel.
 752 * But if you want the full story, read on!
 753 *
 754 * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it
 755 * is illegal to block while in an RCU read-side critical section.  In
 756 * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU)
 757 * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may
 758 * be preempted, but explicit blocking is illegal.  Finally, in preemptible
 759 * RCU implementations in real-time (with -rt patchset) kernel builds,
 760 * RCU read-side critical sections may be preempted and they may also
 761 * block, but only when acquiring spinlocks that are subject to priority
 762 * inheritance.
 763 */
 764static inline void rcu_read_lock(void)
 765{
 766        __rcu_read_lock();
 767        __acquire(RCU);
 768        rcu_lock_acquire(&rcu_lock_map);
 769        rcu_lockdep_assert(!rcu_is_cpu_idle(),
 770                           "rcu_read_lock() used illegally while idle");
 771}
 772
 773/*
 774 * So where is rcu_write_lock()?  It does not exist, as there is no
 775 * way for writers to lock out RCU readers.  This is a feature, not
 776 * a bug -- this property is what provides RCU's performance benefits.
 777 * Of course, writers must coordinate with each other.  The normal
 778 * spinlock primitives work well for this, but any other technique may be
 779 * used as well.  RCU does not care how the writers keep out of each
 780 * others' way, as long as they do so.
 781 */
 782
 783/**
 784 * rcu_read_unlock() - marks the end of an RCU read-side critical section.
 785 *
 786 * See rcu_read_lock() for more information.
 787 */
 788static inline void rcu_read_unlock(void)
 789{
 790        rcu_lockdep_assert(!rcu_is_cpu_idle(),
 791                           "rcu_read_unlock() used illegally while idle");
 792        rcu_lock_release(&rcu_lock_map);
 793        __release(RCU);
 794        __rcu_read_unlock();
 795}
 796
 797/**
 798 * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
 799 *
 800 * This is equivalent of rcu_read_lock(), but to be used when updates
 801 * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
 802 * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
 803 * softirq handler to be a quiescent state, a process in RCU read-side
 804 * critical section must be protected by disabling softirqs. Read-side
 805 * critical sections in interrupt context can use just rcu_read_lock(),
 806 * though this should at least be commented to avoid confusing people
 807 * reading the code.
 808 *
 809 * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
 810 * must occur in the same context, for example, it is illegal to invoke
 811 * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
 812 * was invoked from some other task.
 813 */
 814static inline void rcu_read_lock_bh(void)
 815{
 816        local_bh_disable();
 817        __acquire(RCU_BH);
 818        rcu_lock_acquire(&rcu_bh_lock_map);
 819        rcu_lockdep_assert(!rcu_is_cpu_idle(),
 820                           "rcu_read_lock_bh() used illegally while idle");
 821}
 822
 823/*
 824 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
 825 *
 826 * See rcu_read_lock_bh() for more information.
 827 */
 828static inline void rcu_read_unlock_bh(void)
 829{
 830        rcu_lockdep_assert(!rcu_is_cpu_idle(),
 831                           "rcu_read_unlock_bh() used illegally while idle");
 832        rcu_lock_release(&rcu_bh_lock_map);
 833        __release(RCU_BH);
 834        local_bh_enable();
 835}
 836
 837/**
 838 * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
 839 *
 840 * This is equivalent of rcu_read_lock(), but to be used when updates
 841 * are being done using call_rcu_sched() or synchronize_rcu_sched().
 842 * Read-side critical sections can also be introduced by anything that
 843 * disables preemption, including local_irq_disable() and friends.
 844 *
 845 * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
 846 * must occur in the same context, for example, it is illegal to invoke
 847 * rcu_read_unlock_sched() from process context if the matching
 848 * rcu_read_lock_sched() was invoked from an NMI handler.
 849 */
 850static inline void rcu_read_lock_sched(void)
 851{
 852        preempt_disable();
 853        __acquire(RCU_SCHED);
 854        rcu_lock_acquire(&rcu_sched_lock_map);
 855        rcu_lockdep_assert(!rcu_is_cpu_idle(),
 856                           "rcu_read_lock_sched() used illegally while idle");
 857}
 858
 859/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
 860static inline notrace void rcu_read_lock_sched_notrace(void)
 861{
 862        preempt_disable_notrace();
 863        __acquire(RCU_SCHED);
 864}
 865
 866/*
 867 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
 868 *
 869 * See rcu_read_lock_sched for more information.
 870 */
 871static inline void rcu_read_unlock_sched(void)
 872{
 873        rcu_lockdep_assert(!rcu_is_cpu_idle(),
 874                           "rcu_read_unlock_sched() used illegally while idle");
 875        rcu_lock_release(&rcu_sched_lock_map);
 876        __release(RCU_SCHED);
 877        preempt_enable();
 878}
 879
 880/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
 881static inline notrace void rcu_read_unlock_sched_notrace(void)
 882{
 883        __release(RCU_SCHED);
 884        preempt_enable_notrace();
 885}
 886
 887/**
 888 * rcu_assign_pointer() - assign to RCU-protected pointer
 889 * @p: pointer to assign to
 890 * @v: value to assign (publish)
 891 *
 892 * Assigns the specified value to the specified RCU-protected
 893 * pointer, ensuring that any concurrent RCU readers will see
 894 * any prior initialization.
 895 *
 896 * Inserts memory barriers on architectures that require them
 897 * (which is most of them), and also prevents the compiler from
 898 * reordering the code that initializes the structure after the pointer
 899 * assignment.  More importantly, this call documents which pointers
 900 * will be dereferenced by RCU read-side code.
 901 *
 902 * In some special cases, you may use RCU_INIT_POINTER() instead
 903 * of rcu_assign_pointer().  RCU_INIT_POINTER() is a bit faster due
 904 * to the fact that it does not constrain either the CPU or the compiler.
 905 * That said, using RCU_INIT_POINTER() when you should have used
 906 * rcu_assign_pointer() is a very bad thing that results in
 907 * impossible-to-diagnose memory corruption.  So please be careful.
 908 * See the RCU_INIT_POINTER() comment header for details.
 909 */
 910#define rcu_assign_pointer(p, v) \
 911        __rcu_assign_pointer((p), (v), __rcu)
 912
 913/**
 914 * RCU_INIT_POINTER() - initialize an RCU protected pointer
 915 *
 916 * Initialize an RCU-protected pointer in special cases where readers
 917 * do not need ordering constraints on the CPU or the compiler.  These
 918 * special cases are:
 919 *
 920 * 1.   This use of RCU_INIT_POINTER() is NULLing out the pointer -or-
 921 * 2.   The caller has taken whatever steps are required to prevent
 922 *      RCU readers from concurrently accessing this pointer -or-
 923 * 3.   The referenced data structure has already been exposed to
 924 *      readers either at compile time or via rcu_assign_pointer() -and-
 925 *      a.      You have not made -any- reader-visible changes to
 926 *              this structure since then -or-
 927 *      b.      It is OK for readers accessing this structure from its
 928 *              new location to see the old state of the structure.  (For
 929 *              example, the changes were to statistical counters or to
 930 *              other state where exact synchronization is not required.)
 931 *
 932 * Failure to follow these rules governing use of RCU_INIT_POINTER() will
 933 * result in impossible-to-diagnose memory corruption.  As in the structures
 934 * will look OK in crash dumps, but any concurrent RCU readers might
 935 * see pre-initialized values of the referenced data structure.  So
 936 * please be very careful how you use RCU_INIT_POINTER()!!!
 937 *
 938 * If you are creating an RCU-protected linked structure that is accessed
 939 * by a single external-to-structure RCU-protected pointer, then you may
 940 * use RCU_INIT_POINTER() to initialize the internal RCU-protected
 941 * pointers, but you must use rcu_assign_pointer() to initialize the
 942 * external-to-structure pointer -after- you have completely initialized
 943 * the reader-accessible portions of the linked structure.
 944 */
 945#define RCU_INIT_POINTER(p, v) \
 946        do { \
 947                p = (typeof(*v) __force __rcu *)(v); \
 948        } while (0)
 949
 950/**
 951 * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
 952 *
 953 * GCC-style initialization for an RCU-protected pointer in a structure field.
 954 */
 955#define RCU_POINTER_INITIALIZER(p, v) \
 956                .p = (typeof(*v) __force __rcu *)(v)
 957
 958/*
 959 * Does the specified offset indicate that the corresponding rcu_head
 960 * structure can be handled by kfree_rcu()?
 961 */
 962#define __is_kfree_rcu_offset(offset) ((offset) < 4096)
 963
 964/*
 965 * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
 966 */
 967#define __kfree_rcu(head, offset) \
 968        do { \
 969                BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
 970                kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
 971        } while (0)
 972
 973/**
 974 * kfree_rcu() - kfree an object after a grace period.
 975 * @ptr:        pointer to kfree
 976 * @rcu_head:   the name of the struct rcu_head within the type of @ptr.
 977 *
 978 * Many rcu callbacks functions just call kfree() on the base structure.
 979 * These functions are trivial, but their size adds up, and furthermore
 980 * when they are used in a kernel module, that module must invoke the
 981 * high-latency rcu_barrier() function at module-unload time.
 982 *
 983 * The kfree_rcu() function handles this issue.  Rather than encoding a
 984 * function address in the embedded rcu_head structure, kfree_rcu() instead
 985 * encodes the offset of the rcu_head structure within the base structure.
 986 * Because the functions are not allowed in the low-order 4096 bytes of
 987 * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
 988 * If the offset is larger than 4095 bytes, a compile-time error will
 989 * be generated in __kfree_rcu().  If this error is triggered, you can
 990 * either fall back to use of call_rcu() or rearrange the structure to
 991 * position the rcu_head structure into the first 4096 bytes.
 992 *
 993 * Note that the allowable offset might decrease in the future, for example,
 994 * to allow something like kmem_cache_free_rcu().
 995 *
 996 * The BUILD_BUG_ON check must not involve any function calls, hence the
 997 * checks are done in macros here.
 998 */
 999#define kfree_rcu(ptr, rcu_head)                                        \
1000        __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
1001
1002#endif /* __LINUX_RCUPDATE_H */
1003
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.