linux/kernel/rcupdate.c
<<
>>
Prefs
   1/*
   2 * Read-Copy Update mechanism for mutual exclusion
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17 *
  18 * Copyright IBM Corporation, 2001
  19 *
  20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
  21 *          Manfred Spraul <manfred@colorfullife.com>
  22 * 
  23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
  24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
  25 * Papers:
  26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
  27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
  28 *
  29 * For detailed explanation of Read-Copy Update mechanism see -
  30 *              http://lse.sourceforge.net/locking/rcupdate.html
  31 *
  32 */
  33#include <linux/types.h>
  34#include <linux/kernel.h>
  35#include <linux/init.h>
  36#include <linux/spinlock.h>
  37#include <linux/smp.h>
  38#include <linux/interrupt.h>
  39#include <linux/sched.h>
  40#include <asm/atomic.h>
  41#include <linux/bitops.h>
  42#include <linux/percpu.h>
  43#include <linux/notifier.h>
  44#include <linux/cpu.h>
  45#include <linux/mutex.h>
  46#include <linux/module.h>
  47
  48enum rcu_barrier {
  49        RCU_BARRIER_STD,
  50        RCU_BARRIER_BH,
  51        RCU_BARRIER_SCHED,
  52};
  53
  54static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
  55static atomic_t rcu_barrier_cpu_count;
  56static DEFINE_MUTEX(rcu_barrier_mutex);
  57static struct completion rcu_barrier_completion;
  58
  59/*
  60 * Awaken the corresponding synchronize_rcu() instance now that a
  61 * grace period has elapsed.
  62 */
  63void wakeme_after_rcu(struct rcu_head  *head)
  64{
  65        struct rcu_synchronize *rcu;
  66
  67        rcu = container_of(head, struct rcu_synchronize, head);
  68        complete(&rcu->completion);
  69}
  70
  71/**
  72 * synchronize_rcu - wait until a grace period has elapsed.
  73 *
  74 * Control will return to the caller some time after a full grace
  75 * period has elapsed, in other words after all currently executing RCU
  76 * read-side critical sections have completed.  RCU read-side critical
  77 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
  78 * and may be nested.
  79 */
  80void synchronize_rcu(void);     /* Makes kernel-doc tools happy */
  81synchronize_rcu_xxx(synchronize_rcu, call_rcu)
  82EXPORT_SYMBOL_GPL(synchronize_rcu);
  83
  84static void rcu_barrier_callback(struct rcu_head *notused)
  85{
  86        if (atomic_dec_and_test(&rcu_barrier_cpu_count))
  87                complete(&rcu_barrier_completion);
  88}
  89
  90/*
  91 * Called with preemption disabled, and from cross-cpu IRQ context.
  92 */
  93static void rcu_barrier_func(void *type)
  94{
  95        int cpu = smp_processor_id();
  96        struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
  97
  98        atomic_inc(&rcu_barrier_cpu_count);
  99        switch ((enum rcu_barrier)type) {
 100        case RCU_BARRIER_STD:
 101                call_rcu(head, rcu_barrier_callback);
 102                break;
 103        case RCU_BARRIER_BH:
 104                call_rcu_bh(head, rcu_barrier_callback);
 105                break;
 106        case RCU_BARRIER_SCHED:
 107                call_rcu_sched(head, rcu_barrier_callback);
 108                break;
 109        }
 110}
 111
 112/*
 113 * Orchestrate the specified type of RCU barrier, waiting for all
 114 * RCU callbacks of the specified type to complete.
 115 */
 116static void _rcu_barrier(enum rcu_barrier type)
 117{
 118        BUG_ON(in_interrupt());
 119        /* Take cpucontrol mutex to protect against CPU hotplug */
 120        mutex_lock(&rcu_barrier_mutex);
 121        init_completion(&rcu_barrier_completion);
 122        /*
 123         * Initialize rcu_barrier_cpu_count to 1, then invoke
 124         * rcu_barrier_func() on each CPU, so that each CPU also has
 125         * incremented rcu_barrier_cpu_count.  Only then is it safe to
 126         * decrement rcu_barrier_cpu_count -- otherwise the first CPU
 127         * might complete its grace period before all of the other CPUs
 128         * did their increment, causing this function to return too
 129         * early.
 130         */
 131        atomic_set(&rcu_barrier_cpu_count, 1);
 132        on_each_cpu(rcu_barrier_func, (void *)type, 1);
 133        if (atomic_dec_and_test(&rcu_barrier_cpu_count))
 134                complete(&rcu_barrier_completion);
 135        wait_for_completion(&rcu_barrier_completion);
 136        mutex_unlock(&rcu_barrier_mutex);
 137}
 138
 139/**
 140 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
 141 */
 142void rcu_barrier(void)
 143{
 144        _rcu_barrier(RCU_BARRIER_STD);
 145}
 146EXPORT_SYMBOL_GPL(rcu_barrier);
 147
 148/**
 149 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
 150 */
 151void rcu_barrier_bh(void)
 152{
 153        _rcu_barrier(RCU_BARRIER_BH);
 154}
 155EXPORT_SYMBOL_GPL(rcu_barrier_bh);
 156
 157/**
 158 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
 159 */
 160void rcu_barrier_sched(void)
 161{
 162        _rcu_barrier(RCU_BARRIER_SCHED);
 163}
 164EXPORT_SYMBOL_GPL(rcu_barrier_sched);
 165
 166void __init rcu_init(void)
 167{
 168        __rcu_init();
 169}
 170
 171