linux/kernel/rcupdate.c
<<
>>
Prefs
   1/*
   2 * Read-Copy Update mechanism for mutual exclusion
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17 *
  18 * Copyright IBM Corporation, 2001
  19 *
  20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
  21 *          Manfred Spraul <manfred@colorfullife.com>
  22 * 
  23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
  24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
  25 * Papers:
  26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
  27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
  28 *
  29 * For detailed explanation of Read-Copy Update mechanism see -
  30 *              http://lse.sourceforge.net/locking/rcupdate.html
  31 *
  32 */
  33#include <linux/types.h>
  34#include <linux/kernel.h>
  35#include <linux/init.h>
  36#include <linux/spinlock.h>
  37#include <linux/smp.h>
  38#include <linux/interrupt.h>
  39#include <linux/sched.h>
  40#include <asm/atomic.h>
  41#include <linux/bitops.h>
  42#include <linux/percpu.h>
  43#include <linux/notifier.h>
  44#include <linux/cpu.h>
  45#include <linux/mutex.h>
  46#include <linux/module.h>
  47#include <linux/kernel_stat.h>
  48
  49enum rcu_barrier {
  50        RCU_BARRIER_STD,
  51        RCU_BARRIER_BH,
  52        RCU_BARRIER_SCHED,
  53};
  54
  55static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
  56static atomic_t rcu_barrier_cpu_count;
  57static DEFINE_MUTEX(rcu_barrier_mutex);
  58static struct completion rcu_barrier_completion;
  59int rcu_scheduler_active __read_mostly;
  60
  61static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
  62static struct rcu_head rcu_migrate_head[3];
  63static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
  64
  65/*
  66 * Awaken the corresponding synchronize_rcu() instance now that a
  67 * grace period has elapsed.
  68 */
  69void wakeme_after_rcu(struct rcu_head  *head)
  70{
  71        struct rcu_synchronize *rcu;
  72
  73        rcu = container_of(head, struct rcu_synchronize, head);
  74        complete(&rcu->completion);
  75}
  76
  77/**
  78 * synchronize_rcu - wait until a grace period has elapsed.
  79 *
  80 * Control will return to the caller some time after a full grace
  81 * period has elapsed, in other words after all currently executing RCU
  82 * read-side critical sections have completed.  RCU read-side critical
  83 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
  84 * and may be nested.
  85 */
  86void synchronize_rcu(void)
  87{
  88        struct rcu_synchronize rcu;
  89
  90        if (rcu_blocking_is_gp())
  91                return;
  92
  93        init_completion(&rcu.completion);
  94        /* Will wake me after RCU finished. */
  95        call_rcu(&rcu.head, wakeme_after_rcu);
  96        /* Wait for it. */
  97        wait_for_completion(&rcu.completion);
  98}
  99EXPORT_SYMBOL_GPL(synchronize_rcu);
 100
 101static void rcu_barrier_callback(struct rcu_head *notused)
 102{
 103        if (atomic_dec_and_test(&rcu_barrier_cpu_count))
 104                complete(&rcu_barrier_completion);
 105}
 106
 107/*
 108 * Called with preemption disabled, and from cross-cpu IRQ context.
 109 */
 110static void rcu_barrier_func(void *type)
 111{
 112        int cpu = smp_processor_id();
 113        struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
 114
 115        atomic_inc(&rcu_barrier_cpu_count);
 116        switch ((enum rcu_barrier)type) {
 117        case RCU_BARRIER_STD:
 118                call_rcu(head, rcu_barrier_callback);
 119                break;
 120        case RCU_BARRIER_BH:
 121                call_rcu_bh(head, rcu_barrier_callback);
 122                break;
 123        case RCU_BARRIER_SCHED:
 124                call_rcu_sched(head, rcu_barrier_callback);
 125                break;
 126        }
 127}
 128
 129static inline void wait_migrated_callbacks(void)
 130{
 131        wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
 132}
 133
 134/*
 135 * Orchestrate the specified type of RCU barrier, waiting for all
 136 * RCU callbacks of the specified type to complete.
 137 */
 138static void _rcu_barrier(enum rcu_barrier type)
 139{
 140        BUG_ON(in_interrupt());
 141        /* Take cpucontrol mutex to protect against CPU hotplug */
 142        mutex_lock(&rcu_barrier_mutex);
 143        init_completion(&rcu_barrier_completion);
 144        /*
 145         * Initialize rcu_barrier_cpu_count to 1, then invoke
 146         * rcu_barrier_func() on each CPU, so that each CPU also has
 147         * incremented rcu_barrier_cpu_count.  Only then is it safe to
 148         * decrement rcu_barrier_cpu_count -- otherwise the first CPU
 149         * might complete its grace period before all of the other CPUs
 150         * did their increment, causing this function to return too
 151         * early.
 152         */
 153        atomic_set(&rcu_barrier_cpu_count, 1);
 154        on_each_cpu(rcu_barrier_func, (void *)type, 1);
 155        if (atomic_dec_and_test(&rcu_barrier_cpu_count))
 156                complete(&rcu_barrier_completion);
 157        wait_for_completion(&rcu_barrier_completion);
 158        mutex_unlock(&rcu_barrier_mutex);
 159        wait_migrated_callbacks();
 160}
 161
 162/**
 163 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
 164 */
 165void rcu_barrier(void)
 166{
 167        _rcu_barrier(RCU_BARRIER_STD);
 168}
 169EXPORT_SYMBOL_GPL(rcu_barrier);
 170
 171/**
 172 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
 173 */
 174void rcu_barrier_bh(void)
 175{
 176        _rcu_barrier(RCU_BARRIER_BH);
 177}
 178EXPORT_SYMBOL_GPL(rcu_barrier_bh);
 179
 180/**
 181 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
 182 */
 183void rcu_barrier_sched(void)
 184{
 185        _rcu_barrier(RCU_BARRIER_SCHED);
 186}
 187EXPORT_SYMBOL_GPL(rcu_barrier_sched);
 188
 189static void rcu_migrate_callback(struct rcu_head *notused)
 190{
 191        if (atomic_dec_and_test(&rcu_migrate_type_count))
 192                wake_up(&rcu_migrate_wq);
 193}
 194
 195static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
 196                unsigned long action, void *hcpu)
 197{
 198        if (action == CPU_DYING) {
 199                /*
 200                 * preempt_disable() in on_each_cpu() prevents stop_machine(),
 201                 * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
 202                 * returns, all online cpus have queued rcu_barrier_func(),
 203                 * and the dead cpu(if it exist) queues rcu_migrate_callback()s.
 204                 *
 205                 * These callbacks ensure _rcu_barrier() waits for all
 206                 * RCU callbacks of the specified type to complete.
 207                 */
 208                atomic_set(&rcu_migrate_type_count, 3);
 209                call_rcu_bh(rcu_migrate_head, rcu_migrate_callback);
 210                call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback);
 211                call_rcu(rcu_migrate_head + 2, rcu_migrate_callback);
 212        } else if (action == CPU_POST_DEAD) {
 213                /* rcu_migrate_head is protected by cpu_add_remove_lock */
 214                wait_migrated_callbacks();
 215        }
 216
 217        return NOTIFY_OK;
 218}
 219
 220void __init rcu_init(void)
 221{
 222        __rcu_init();
 223        hotcpu_notifier(rcu_barrier_cpu_hotplug, 0);
 224}
 225
 226void rcu_scheduler_starting(void)
 227{
 228        WARN_ON(num_online_cpus() != 1);
 229        WARN_ON(nr_context_switches() > 0);
 230        rcu_scheduler_active = 1;
 231}
 232