linux/kernel/rcutiny.c
<<
>>
Prefs
   1/*
   2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17 *
  18 * Copyright IBM Corporation, 2008
  19 *
  20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
  21 *
  22 * For detailed explanation of Read-Copy Update mechanism see -
  23 *              Documentation/RCU
  24 */
  25#include <linux/moduleparam.h>
  26#include <linux/completion.h>
  27#include <linux/interrupt.h>
  28#include <linux/notifier.h>
  29#include <linux/rcupdate.h>
  30#include <linux/kernel.h>
  31#include <linux/module.h>
  32#include <linux/mutex.h>
  33#include <linux/sched.h>
  34#include <linux/types.h>
  35#include <linux/init.h>
  36#include <linux/time.h>
  37#include <linux/cpu.h>
  38
  39/* Global control variables for rcupdate callback mechanism. */
  40struct rcu_ctrlblk {
  41        struct rcu_head *rcucblist;     /* List of pending callbacks (CBs). */
  42        struct rcu_head **donetail;     /* ->next pointer of last "done" CB. */
  43        struct rcu_head **curtail;      /* ->next pointer of last CB. */
  44};
  45
  46/* Definition for rcupdate control block. */
  47static struct rcu_ctrlblk rcu_sched_ctrlblk = {
  48        .donetail       = &rcu_sched_ctrlblk.rcucblist,
  49        .curtail        = &rcu_sched_ctrlblk.rcucblist,
  50};
  51
  52static struct rcu_ctrlblk rcu_bh_ctrlblk = {
  53        .donetail       = &rcu_bh_ctrlblk.rcucblist,
  54        .curtail        = &rcu_bh_ctrlblk.rcucblist,
  55};
  56
  57#ifdef CONFIG_DEBUG_LOCK_ALLOC
  58int rcu_scheduler_active __read_mostly;
  59EXPORT_SYMBOL_GPL(rcu_scheduler_active);
  60#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  61
  62/* Forward declarations for rcutiny_plugin.h. */
  63static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
  64static void __call_rcu(struct rcu_head *head,
  65                       void (*func)(struct rcu_head *rcu),
  66                       struct rcu_ctrlblk *rcp);
  67
  68#include "rcutiny_plugin.h"
  69
  70#ifdef CONFIG_NO_HZ
  71
  72static long rcu_dynticks_nesting = 1;
  73
  74/*
  75 * Enter dynticks-idle mode, which is an extended quiescent state
  76 * if we have fully entered that mode (i.e., if the new value of
  77 * dynticks_nesting is zero).
  78 */
  79void rcu_enter_nohz(void)
  80{
  81        if (--rcu_dynticks_nesting == 0)
  82                rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
  83}
  84
  85/*
  86 * Exit dynticks-idle mode, so that we are no longer in an extended
  87 * quiescent state.
  88 */
  89void rcu_exit_nohz(void)
  90{
  91        rcu_dynticks_nesting++;
  92}
  93
  94#endif /* #ifdef CONFIG_NO_HZ */
  95
  96/*
  97 * Helper function for rcu_qsctr_inc() and rcu_bh_qsctr_inc().
  98 * Also disable irqs to avoid confusion due to interrupt handlers
  99 * invoking call_rcu().
 100 */
 101static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
 102{
 103        unsigned long flags;
 104
 105        local_irq_save(flags);
 106        if (rcp->rcucblist != NULL &&
 107            rcp->donetail != rcp->curtail) {
 108                rcp->donetail = rcp->curtail;
 109                local_irq_restore(flags);
 110                return 1;
 111        }
 112        local_irq_restore(flags);
 113
 114        return 0;
 115}
 116
 117/*
 118 * Record an rcu quiescent state.  And an rcu_bh quiescent state while we
 119 * are at it, given that any rcu quiescent state is also an rcu_bh
 120 * quiescent state.  Use "+" instead of "||" to defeat short circuiting.
 121 */
 122void rcu_sched_qs(int cpu)
 123{
 124        if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
 125            rcu_qsctr_help(&rcu_bh_ctrlblk))
 126                raise_softirq(RCU_SOFTIRQ);
 127}
 128
 129/*
 130 * Record an rcu_bh quiescent state.
 131 */
 132void rcu_bh_qs(int cpu)
 133{
 134        if (rcu_qsctr_help(&rcu_bh_ctrlblk))
 135                raise_softirq(RCU_SOFTIRQ);
 136}
 137
 138/*
 139 * Check to see if the scheduling-clock interrupt came from an extended
 140 * quiescent state, and, if so, tell RCU about it.
 141 */
 142void rcu_check_callbacks(int cpu, int user)
 143{
 144        if (user ||
 145            (idle_cpu(cpu) &&
 146             !in_softirq() &&
 147             hardirq_count() <= (1 << HARDIRQ_SHIFT)))
 148                rcu_sched_qs(cpu);
 149        else if (!in_softirq())
 150                rcu_bh_qs(cpu);
 151        rcu_preempt_check_callbacks();
 152}
 153
 154/*
 155 * Helper function for rcu_process_callbacks() that operates on the
 156 * specified rcu_ctrlkblk structure.
 157 */
 158static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
 159{
 160        struct rcu_head *next, *list;
 161        unsigned long flags;
 162
 163        /* If no RCU callbacks ready to invoke, just return. */
 164        if (&rcp->rcucblist == rcp->donetail)
 165                return;
 166
 167        /* Move the ready-to-invoke callbacks to a local list. */
 168        local_irq_save(flags);
 169        list = rcp->rcucblist;
 170        rcp->rcucblist = *rcp->donetail;
 171        *rcp->donetail = NULL;
 172        if (rcp->curtail == rcp->donetail)
 173                rcp->curtail = &rcp->rcucblist;
 174        rcu_preempt_remove_callbacks(rcp);
 175        rcp->donetail = &rcp->rcucblist;
 176        local_irq_restore(flags);
 177
 178        /* Invoke the callbacks on the local list. */
 179        while (list) {
 180                next = list->next;
 181                prefetch(next);
 182                debug_rcu_head_unqueue(list);
 183                list->func(list);
 184                list = next;
 185        }
 186}
 187
 188/*
 189 * Invoke any callbacks whose grace period has completed.
 190 */
 191static void rcu_process_callbacks(struct softirq_action *unused)
 192{
 193        __rcu_process_callbacks(&rcu_sched_ctrlblk);
 194        __rcu_process_callbacks(&rcu_bh_ctrlblk);
 195        rcu_preempt_process_callbacks();
 196}
 197
 198/*
 199 * Wait for a grace period to elapse.  But it is illegal to invoke
 200 * synchronize_sched() from within an RCU read-side critical section.
 201 * Therefore, any legal call to synchronize_sched() is a quiescent
 202 * state, and so on a UP system, synchronize_sched() need do nothing.
 203 * Ditto for synchronize_rcu_bh().  (But Lai Jiangshan points out the
 204 * benefits of doing might_sleep() to reduce latency.)
 205 *
 206 * Cool, huh?  (Due to Josh Triplett.)
 207 *
 208 * But we want to make this a static inline later.  The cond_resched()
 209 * currently makes this problematic.
 210 */
 211void synchronize_sched(void)
 212{
 213        cond_resched();
 214}
 215EXPORT_SYMBOL_GPL(synchronize_sched);
 216
 217/*
 218 * Helper function for call_rcu() and call_rcu_bh().
 219 */
 220static void __call_rcu(struct rcu_head *head,
 221                       void (*func)(struct rcu_head *rcu),
 222                       struct rcu_ctrlblk *rcp)
 223{
 224        unsigned long flags;
 225
 226        debug_rcu_head_queue(head);
 227        head->func = func;
 228        head->next = NULL;
 229
 230        local_irq_save(flags);
 231        *rcp->curtail = head;
 232        rcp->curtail = &head->next;
 233        local_irq_restore(flags);
 234}
 235
 236/*
 237 * Post an RCU callback to be invoked after the end of an RCU-sched grace
 238 * period.  But since we have but one CPU, that would be after any
 239 * quiescent state.
 240 */
 241void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
 242{
 243        __call_rcu(head, func, &rcu_sched_ctrlblk);
 244}
 245EXPORT_SYMBOL_GPL(call_rcu_sched);
 246
 247/*
 248 * Post an RCU bottom-half callback to be invoked after any subsequent
 249 * quiescent state.
 250 */
 251void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
 252{
 253        __call_rcu(head, func, &rcu_bh_ctrlblk);
 254}
 255EXPORT_SYMBOL_GPL(call_rcu_bh);
 256
 257void rcu_barrier_bh(void)
 258{
 259        struct rcu_synchronize rcu;
 260
 261        init_rcu_head_on_stack(&rcu.head);
 262        init_completion(&rcu.completion);
 263        /* Will wake me after RCU finished. */
 264        call_rcu_bh(&rcu.head, wakeme_after_rcu);
 265        /* Wait for it. */
 266        wait_for_completion(&rcu.completion);
 267        destroy_rcu_head_on_stack(&rcu.head);
 268}
 269EXPORT_SYMBOL_GPL(rcu_barrier_bh);
 270
 271void rcu_barrier_sched(void)
 272{
 273        struct rcu_synchronize rcu;
 274
 275        init_rcu_head_on_stack(&rcu.head);
 276        init_completion(&rcu.completion);
 277        /* Will wake me after RCU finished. */
 278        call_rcu_sched(&rcu.head, wakeme_after_rcu);
 279        /* Wait for it. */
 280        wait_for_completion(&rcu.completion);
 281        destroy_rcu_head_on_stack(&rcu.head);
 282}
 283EXPORT_SYMBOL_GPL(rcu_barrier_sched);
 284
 285void __init rcu_init(void)
 286{
 287        open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 288}
 289