linux/kernel/rcuclassic.c
<<
>>
Prefs
   1/*
   2 * Read-Copy Update mechanism for mutual exclusion
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17 *
  18 * Copyright IBM Corporation, 2001
  19 *
  20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
  21 *          Manfred Spraul <manfred@colorfullife.com>
  22 *
  23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
  24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
  25 * Papers:
  26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
  27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
  28 *
  29 * For detailed explanation of Read-Copy Update mechanism see -
  30 *              Documentation/RCU
  31 *
  32 */
  33#include <linux/types.h>
  34#include <linux/kernel.h>
  35#include <linux/init.h>
  36#include <linux/spinlock.h>
  37#include <linux/smp.h>
  38#include <linux/rcupdate.h>
  39#include <linux/interrupt.h>
  40#include <linux/sched.h>
  41#include <asm/atomic.h>
  42#include <linux/bitops.h>
  43#include <linux/module.h>
  44#include <linux/completion.h>
  45#include <linux/moduleparam.h>
  46#include <linux/percpu.h>
  47#include <linux/notifier.h>
  48#include <linux/cpu.h>
  49#include <linux/mutex.h>
  50#include <linux/time.h>
  51
  52#ifdef CONFIG_DEBUG_LOCK_ALLOC
  53static struct lock_class_key rcu_lock_key;
  54struct lockdep_map rcu_lock_map =
  55        STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
  56EXPORT_SYMBOL_GPL(rcu_lock_map);
  57#endif
  58
  59
  60/* Definition for rcupdate control block. */
  61static struct rcu_ctrlblk rcu_ctrlblk = {
  62        .cur = -300,
  63        .completed = -300,
  64        .pending = -300,
  65        .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
  66        .cpumask = CPU_BITS_NONE,
  67};
  68
  69static struct rcu_ctrlblk rcu_bh_ctrlblk = {
  70        .cur = -300,
  71        .completed = -300,
  72        .pending = -300,
  73        .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
  74        .cpumask = CPU_BITS_NONE,
  75};
  76
  77static DEFINE_PER_CPU(struct rcu_data, rcu_data);
  78static DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
  79
  80/*
  81 * Increment the quiescent state counter.
  82 * The counter is a bit degenerated: We do not need to know
  83 * how many quiescent states passed, just if there was at least
  84 * one since the start of the grace period. Thus just a flag.
  85 */
  86void rcu_qsctr_inc(int cpu)
  87{
  88        struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
  89        rdp->passed_quiesc = 1;
  90}
  91
  92void rcu_bh_qsctr_inc(int cpu)
  93{
  94        struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
  95        rdp->passed_quiesc = 1;
  96}
  97
  98static int blimit = 10;
  99static int qhimark = 10000;
 100static int qlowmark = 100;
 101
 102#ifdef CONFIG_SMP
 103static void force_quiescent_state(struct rcu_data *rdp,
 104                        struct rcu_ctrlblk *rcp)
 105{
 106        int cpu;
 107        unsigned long flags;
 108
 109        set_need_resched();
 110        spin_lock_irqsave(&rcp->lock, flags);
 111        if (unlikely(!rcp->signaled)) {
 112                rcp->signaled = 1;
 113                /*
 114                 * Don't send IPI to itself. With irqs disabled,
 115                 * rdp->cpu is the current cpu.
 116                 *
 117                 * cpu_online_mask is updated by the _cpu_down()
 118                 * using __stop_machine(). Since we're in irqs disabled
 119                 * section, __stop_machine() is not exectuting, hence
 120                 * the cpu_online_mask is stable.
 121                 *
 122                 * However,  a cpu might have been offlined _just_ before
 123                 * we disabled irqs while entering here.
 124                 * And rcu subsystem might not yet have handled the CPU_DEAD
 125                 * notification, leading to the offlined cpu's bit
 126                 * being set in the rcp->cpumask.
 127                 *
 128                 * Hence cpumask = (rcp->cpumask & cpu_online_mask) to prevent
 129                 * sending smp_reschedule() to an offlined CPU.
 130                 */
 131                for_each_cpu_and(cpu,
 132                                  to_cpumask(rcp->cpumask), cpu_online_mask) {
 133                        if (cpu != rdp->cpu)
 134                                smp_send_reschedule(cpu);
 135                }
 136        }
 137        spin_unlock_irqrestore(&rcp->lock, flags);
 138}
 139#else
 140static inline void force_quiescent_state(struct rcu_data *rdp,
 141                        struct rcu_ctrlblk *rcp)
 142{
 143        set_need_resched();
 144}
 145#endif
 146
 147static void __call_rcu(struct rcu_head *head, struct rcu_ctrlblk *rcp,
 148                struct rcu_data *rdp)
 149{
 150        long batch;
 151
 152        head->next = NULL;
 153        smp_mb(); /* Read of rcu->cur must happen after any change by caller. */
 154
 155        /*
 156         * Determine the batch number of this callback.
 157         *
 158         * Using ACCESS_ONCE to avoid the following error when gcc eliminates
 159         * local variable "batch" and emits codes like this:
 160         *      1) rdp->batch = rcp->cur + 1 # gets old value
 161         *      ......
 162         *      2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value
 163         * then [*nxttail[0], *nxttail[1]) may contain callbacks
 164         * that batch# = rdp->batch, see the comment of struct rcu_data.
 165         */
 166        batch = ACCESS_ONCE(rcp->cur) + 1;
 167
 168        if (rdp->nxtlist && rcu_batch_after(batch, rdp->batch)) {
 169                /* process callbacks */
 170                rdp->nxttail[0] = rdp->nxttail[1];
 171                rdp->nxttail[1] = rdp->nxttail[2];
 172                if (rcu_batch_after(batch - 1, rdp->batch))
 173                        rdp->nxttail[0] = rdp->nxttail[2];
 174        }
 175
 176        rdp->batch = batch;
 177        *rdp->nxttail[2] = head;
 178        rdp->nxttail[2] = &head->next;
 179
 180        if (unlikely(++rdp->qlen > qhimark)) {
 181                rdp->blimit = INT_MAX;
 182                force_quiescent_state(rdp, &rcu_ctrlblk);
 183        }
 184}
 185
 186#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
 187
 188static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
 189{
 190        rcp->gp_start = jiffies;
 191        rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
 192}
 193
 194static void print_other_cpu_stall(struct rcu_ctrlblk *rcp)
 195{
 196        int cpu;
 197        long delta;
 198        unsigned long flags;
 199
 200        /* Only let one CPU complain about others per time interval. */
 201
 202        spin_lock_irqsave(&rcp->lock, flags);
 203        delta = jiffies - rcp->jiffies_stall;
 204        if (delta < 2 || rcp->cur != rcp->completed) {
 205                spin_unlock_irqrestore(&rcp->lock, flags);
 206                return;
 207        }
 208        rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
 209        spin_unlock_irqrestore(&rcp->lock, flags);
 210
 211        /* OK, time to rat on our buddy... */
 212
 213        printk(KERN_ERR "INFO: RCU detected CPU stalls:");
 214        for_each_possible_cpu(cpu) {
 215                if (cpumask_test_cpu(cpu, to_cpumask(rcp->cpumask)))
 216                        printk(" %d", cpu);
 217        }
 218        printk(" (detected by %d, t=%ld jiffies)\n",
 219               smp_processor_id(), (long)(jiffies - rcp->gp_start));
 220}
 221
 222static void print_cpu_stall(struct rcu_ctrlblk *rcp)
 223{
 224        unsigned long flags;
 225
 226        printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu/%lu jiffies)\n",
 227                        smp_processor_id(), jiffies,
 228                        jiffies - rcp->gp_start);
 229        dump_stack();
 230        spin_lock_irqsave(&rcp->lock, flags);
 231        if ((long)(jiffies - rcp->jiffies_stall) >= 0)
 232                rcp->jiffies_stall =
 233                        jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
 234        spin_unlock_irqrestore(&rcp->lock, flags);
 235        set_need_resched();  /* kick ourselves to get things going. */
 236}
 237
 238static void check_cpu_stall(struct rcu_ctrlblk *rcp)
 239{
 240        long delta;
 241
 242        delta = jiffies - rcp->jiffies_stall;
 243        if (cpumask_test_cpu(smp_processor_id(), to_cpumask(rcp->cpumask)) &&
 244                delta >= 0) {
 245
 246                /* We haven't checked in, so go dump stack. */
 247                print_cpu_stall(rcp);
 248
 249        } else if (rcp->cur != rcp->completed && delta >= 2) {
 250
 251                /* They had two seconds to dump stack, so complain. */
 252                print_other_cpu_stall(rcp);
 253        }
 254}
 255
 256#else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
 257
 258static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
 259{
 260}
 261
 262static inline void check_cpu_stall(struct rcu_ctrlblk *rcp)
 263{
 264}
 265
 266#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
 267
 268/**
 269 * call_rcu - Queue an RCU callback for invocation after a grace period.
 270 * @head: structure to be used for queueing the RCU updates.
 271 * @func: actual update function to be invoked after the grace period
 272 *
 273 * The update function will be invoked some time after a full grace
 274 * period elapses, in other words after all currently executing RCU
 275 * read-side critical sections have completed.  RCU read-side critical
 276 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
 277 * and may be nested.
 278 */
 279void call_rcu(struct rcu_head *head,
 280                                void (*func)(struct rcu_head *rcu))
 281{
 282        unsigned long flags;
 283
 284        head->func = func;
 285        local_irq_save(flags);
 286        __call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data));
 287        local_irq_restore(flags);
 288}
 289EXPORT_SYMBOL_GPL(call_rcu);
 290
 291/**
 292 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
 293 * @head: structure to be used for queueing the RCU updates.
 294 * @func: actual update function to be invoked after the grace period
 295 *
 296 * The update function will be invoked some time after a full grace
 297 * period elapses, in other words after all currently executing RCU
 298 * read-side critical sections have completed. call_rcu_bh() assumes
 299 * that the read-side critical sections end on completion of a softirq
 300 * handler. This means that read-side critical sections in process
 301 * context must not be interrupted by softirqs. This interface is to be
 302 * used when most of the read-side critical sections are in softirq context.
 303 * RCU read-side critical sections are delimited by rcu_read_lock() and
 304 * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh()
 305 * and rcu_read_unlock_bh(), if in process context. These may be nested.
 306 */
 307void call_rcu_bh(struct rcu_head *head,
 308                                void (*func)(struct rcu_head *rcu))
 309{
 310        unsigned long flags;
 311
 312        head->func = func;
 313        local_irq_save(flags);
 314        __call_rcu(head, &rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
 315        local_irq_restore(flags);
 316}
 317EXPORT_SYMBOL_GPL(call_rcu_bh);
 318
 319/*
 320 * Return the number of RCU batches processed thus far.  Useful
 321 * for debug and statistics.
 322 */
 323long rcu_batches_completed(void)
 324{
 325        return rcu_ctrlblk.completed;
 326}
 327EXPORT_SYMBOL_GPL(rcu_batches_completed);
 328
 329/*
 330 * Return the number of RCU batches processed thus far.  Useful
 331 * for debug and statistics.
 332 */
 333long rcu_batches_completed_bh(void)
 334{
 335        return rcu_bh_ctrlblk.completed;
 336}
 337EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
 338
 339/* Raises the softirq for processing rcu_callbacks. */
 340static inline void raise_rcu_softirq(void)
 341{
 342        raise_softirq(RCU_SOFTIRQ);
 343}
 344
 345/*
 346 * Invoke the completed RCU callbacks. They are expected to be in
 347 * a per-cpu list.
 348 */
 349static void rcu_do_batch(struct rcu_data *rdp)
 350{
 351        unsigned long flags;
 352        struct rcu_head *next, *list;
 353        int count = 0;
 354
 355        list = rdp->donelist;
 356        while (list) {
 357                next = list->next;
 358                prefetch(next);
 359                list->func(list);
 360                list = next;
 361                if (++count >= rdp->blimit)
 362                        break;
 363        }
 364        rdp->donelist = list;
 365
 366        local_irq_save(flags);
 367        rdp->qlen -= count;
 368        local_irq_restore(flags);
 369        if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
 370                rdp->blimit = blimit;
 371
 372        if (!rdp->donelist)
 373                rdp->donetail = &rdp->donelist;
 374        else
 375                raise_rcu_softirq();
 376}
 377
 378/*
 379 * Grace period handling:
 380 * The grace period handling consists out of two steps:
 381 * - A new grace period is started.
 382 *   This is done by rcu_start_batch. The start is not broadcasted to
 383 *   all cpus, they must pick this up by comparing rcp->cur with
 384 *   rdp->quiescbatch. All cpus are recorded  in the
 385 *   rcu_ctrlblk.cpumask bitmap.
 386 * - All cpus must go through a quiescent state.
 387 *   Since the start of the grace period is not broadcasted, at least two
 388 *   calls to rcu_check_quiescent_state are required:
 389 *   The first call just notices that a new grace period is running. The
 390 *   following calls check if there was a quiescent state since the beginning
 391 *   of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
 392 *   the bitmap is empty, then the grace period is completed.
 393 *   rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
 394 *   period (if necessary).
 395 */
 396
 397/*
 398 * Register a new batch of callbacks, and start it up if there is currently no
 399 * active batch and the batch to be registered has not already occurred.
 400 * Caller must hold rcu_ctrlblk.lock.
 401 */
 402static void rcu_start_batch(struct rcu_ctrlblk *rcp)
 403{
 404        if (rcp->cur != rcp->pending &&
 405                        rcp->completed == rcp->cur) {
 406                rcp->cur++;
 407                record_gp_stall_check_time(rcp);
 408
 409                /*
 410                 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
 411                 * Barrier  Otherwise it can cause tickless idle CPUs to be
 412                 * included in rcp->cpumask, which will extend graceperiods
 413                 * unnecessarily.
 414                 */
 415                smp_mb();
 416                cpumask_andnot(to_cpumask(rcp->cpumask),
 417                               cpu_online_mask, nohz_cpu_mask);
 418
 419                rcp->signaled = 0;
 420        }
 421}
 422
 423/*
 424 * cpu went through a quiescent state since the beginning of the grace period.
 425 * Clear it from the cpu mask and complete the grace period if it was the last
 426 * cpu. Start another grace period if someone has further entries pending
 427 */
 428static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
 429{
 430        cpumask_clear_cpu(cpu, to_cpumask(rcp->cpumask));
 431        if (cpumask_empty(to_cpumask(rcp->cpumask))) {
 432                /* batch completed ! */
 433                rcp->completed = rcp->cur;
 434                rcu_start_batch(rcp);
 435        }
 436}
 437
 438/*
 439 * Check if the cpu has gone through a quiescent state (say context
 440 * switch). If so and if it already hasn't done so in this RCU
 441 * quiescent cycle, then indicate that it has done so.
 442 */
 443static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
 444                                        struct rcu_data *rdp)
 445{
 446        unsigned long flags;
 447
 448        if (rdp->quiescbatch != rcp->cur) {
 449                /* start new grace period: */
 450                rdp->qs_pending = 1;
 451                rdp->passed_quiesc = 0;
 452                rdp->quiescbatch = rcp->cur;
 453                return;
 454        }
 455
 456        /* Grace period already completed for this cpu?
 457         * qs_pending is checked instead of the actual bitmap to avoid
 458         * cacheline trashing.
 459         */
 460        if (!rdp->qs_pending)
 461                return;
 462
 463        /*
 464         * Was there a quiescent state since the beginning of the grace
 465         * period? If no, then exit and wait for the next call.
 466         */
 467        if (!rdp->passed_quiesc)
 468                return;
 469        rdp->qs_pending = 0;
 470
 471        spin_lock_irqsave(&rcp->lock, flags);
 472        /*
 473         * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
 474         * during cpu startup. Ignore the quiescent state.
 475         */
 476        if (likely(rdp->quiescbatch == rcp->cur))
 477                cpu_quiet(rdp->cpu, rcp);
 478
 479        spin_unlock_irqrestore(&rcp->lock, flags);
 480}
 481
 482
 483#ifdef CONFIG_HOTPLUG_CPU
 484
 485/* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing
 486 * locking requirements, the list it's pulling from has to belong to a cpu
 487 * which is dead and hence not processing interrupts.
 488 */
 489static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
 490                                struct rcu_head **tail, long batch)
 491{
 492        unsigned long flags;
 493
 494        if (list) {
 495                local_irq_save(flags);
 496                this_rdp->batch = batch;
 497                *this_rdp->nxttail[2] = list;
 498                this_rdp->nxttail[2] = tail;
 499                local_irq_restore(flags);
 500        }
 501}
 502
 503static void __rcu_offline_cpu(struct rcu_data *this_rdp,
 504                                struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
 505{
 506        unsigned long flags;
 507
 508        /*
 509         * if the cpu going offline owns the grace period
 510         * we can block indefinitely waiting for it, so flush
 511         * it here
 512         */
 513        spin_lock_irqsave(&rcp->lock, flags);
 514        if (rcp->cur != rcp->completed)
 515                cpu_quiet(rdp->cpu, rcp);
 516        rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1);
 517        rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1);
 518        spin_unlock(&rcp->lock);
 519
 520        this_rdp->qlen += rdp->qlen;
 521        local_irq_restore(flags);
 522}
 523
 524static void rcu_offline_cpu(int cpu)
 525{
 526        struct rcu_data *this_rdp = &get_cpu_var(rcu_data);
 527        struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data);
 528
 529        __rcu_offline_cpu(this_rdp, &rcu_ctrlblk,
 530                                        &per_cpu(rcu_data, cpu));
 531        __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk,
 532                                        &per_cpu(rcu_bh_data, cpu));
 533        put_cpu_var(rcu_data);
 534        put_cpu_var(rcu_bh_data);
 535}
 536
 537#else
 538
 539static void rcu_offline_cpu(int cpu)
 540{
 541}
 542
 543#endif
 544
 545/*
 546 * This does the RCU processing work from softirq context.
 547 */
 548static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
 549                                        struct rcu_data *rdp)
 550{
 551        unsigned long flags;
 552        long completed_snap;
 553
 554        if (rdp->nxtlist) {
 555                local_irq_save(flags);
 556                completed_snap = ACCESS_ONCE(rcp->completed);
 557
 558                /*
 559                 * move the other grace-period-completed entries to
 560                 * [rdp->nxtlist, *rdp->nxttail[0]) temporarily
 561                 */
 562                if (!rcu_batch_before(completed_snap, rdp->batch))
 563                        rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2];
 564                else if (!rcu_batch_before(completed_snap, rdp->batch - 1))
 565                        rdp->nxttail[0] = rdp->nxttail[1];
 566
 567                /*
 568                 * the grace period for entries in
 569                 * [rdp->nxtlist, *rdp->nxttail[0]) has completed and
 570                 * move these entries to donelist
 571                 */
 572                if (rdp->nxttail[0] != &rdp->nxtlist) {
 573                        *rdp->donetail = rdp->nxtlist;
 574                        rdp->donetail = rdp->nxttail[0];
 575                        rdp->nxtlist = *rdp->nxttail[0];
 576                        *rdp->donetail = NULL;
 577
 578                        if (rdp->nxttail[1] == rdp->nxttail[0])
 579                                rdp->nxttail[1] = &rdp->nxtlist;
 580                        if (rdp->nxttail[2] == rdp->nxttail[0])
 581                                rdp->nxttail[2] = &rdp->nxtlist;
 582                        rdp->nxttail[0] = &rdp->nxtlist;
 583                }
 584
 585                local_irq_restore(flags);
 586
 587                if (rcu_batch_after(rdp->batch, rcp->pending)) {
 588                        unsigned long flags2;
 589
 590                        /* and start it/schedule start if it's a new batch */
 591                        spin_lock_irqsave(&rcp->lock, flags2);
 592                        if (rcu_batch_after(rdp->batch, rcp->pending)) {
 593                                rcp->pending = rdp->batch;
 594                                rcu_start_batch(rcp);
 595                        }
 596                        spin_unlock_irqrestore(&rcp->lock, flags2);
 597                }
 598        }
 599
 600        rcu_check_quiescent_state(rcp, rdp);
 601        if (rdp->donelist)
 602                rcu_do_batch(rdp);
 603}
 604
 605static void rcu_process_callbacks(struct softirq_action *unused)
 606{
 607        /*
 608         * Memory references from any prior RCU read-side critical sections
 609         * executed by the interrupted code must be see before any RCU
 610         * grace-period manupulations below.
 611         */
 612
 613        smp_mb(); /* See above block comment. */
 614
 615        __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
 616        __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
 617
 618        /*
 619         * Memory references from any later RCU read-side critical sections
 620         * executed by the interrupted code must be see after any RCU
 621         * grace-period manupulations above.
 622         */
 623
 624        smp_mb(); /* See above block comment. */
 625}
 626
 627static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
 628{
 629        /* Check for CPU stalls, if enabled. */
 630        check_cpu_stall(rcp);
 631
 632        if (rdp->nxtlist) {
 633                long completed_snap = ACCESS_ONCE(rcp->completed);
 634
 635                /*
 636                 * This cpu has pending rcu entries and the grace period
 637                 * for them has completed.
 638                 */
 639                if (!rcu_batch_before(completed_snap, rdp->batch))
 640                        return 1;
 641                if (!rcu_batch_before(completed_snap, rdp->batch - 1) &&
 642                                rdp->nxttail[0] != rdp->nxttail[1])
 643                        return 1;
 644                if (rdp->nxttail[0] != &rdp->nxtlist)
 645                        return 1;
 646
 647                /*
 648                 * This cpu has pending rcu entries and the new batch
 649                 * for then hasn't been started nor scheduled start
 650                 */
 651                if (rcu_batch_after(rdp->batch, rcp->pending))
 652                        return 1;
 653        }
 654
 655        /* This cpu has finished callbacks to invoke */
 656        if (rdp->donelist)
 657                return 1;
 658
 659        /* The rcu core waits for a quiescent state from the cpu */
 660        if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
 661                return 1;
 662
 663        /* nothing to do */
 664        return 0;
 665}
 666
 667/*
 668 * Check to see if there is any immediate RCU-related work to be done
 669 * by the current CPU, returning 1 if so.  This function is part of the
 670 * RCU implementation; it is -not- an exported member of the RCU API.
 671 */
 672int rcu_pending(int cpu)
 673{
 674        return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
 675                __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
 676}
 677
 678/*
 679 * Check to see if any future RCU-related work will need to be done
 680 * by the current CPU, even if none need be done immediately, returning
 681 * 1 if so.  This function is part of the RCU implementation; it is -not-
 682 * an exported member of the RCU API.
 683 */
 684int rcu_needs_cpu(int cpu)
 685{
 686        struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
 687        struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
 688
 689        return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu);
 690}
 691
 692/*
 693 * Top-level function driving RCU grace-period detection, normally
 694 * invoked from the scheduler-clock interrupt.  This function simply
 695 * increments counters that are read only from softirq by this same
 696 * CPU, so there are no memory barriers required.
 697 */
 698void rcu_check_callbacks(int cpu, int user)
 699{
 700        if (user ||
 701            (idle_cpu(cpu) && rcu_scheduler_active &&
 702             !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
 703
 704                /*
 705                 * Get here if this CPU took its interrupt from user
 706                 * mode or from the idle loop, and if this is not a
 707                 * nested interrupt.  In this case, the CPU is in
 708                 * a quiescent state, so count it.
 709                 *
 710                 * Also do a memory barrier.  This is needed to handle
 711                 * the case where writes from a preempt-disable section
 712                 * of code get reordered into schedule() by this CPU's
 713                 * write buffer.  The memory barrier makes sure that
 714                 * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see
 715                 * by other CPUs to happen after any such write.
 716                 */
 717
 718                smp_mb();  /* See above block comment. */
 719                rcu_qsctr_inc(cpu);
 720                rcu_bh_qsctr_inc(cpu);
 721
 722        } else if (!in_softirq()) {
 723
 724                /*
 725                 * Get here if this CPU did not take its interrupt from
 726                 * softirq, in other words, if it is not interrupting
 727                 * a rcu_bh read-side critical section.  This is an _bh
 728                 * critical section, so count it.  The memory barrier
 729                 * is needed for the same reason as is the above one.
 730                 */
 731
 732                smp_mb();  /* See above block comment. */
 733                rcu_bh_qsctr_inc(cpu);
 734        }
 735        raise_rcu_softirq();
 736}
 737
 738static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
 739                                                struct rcu_data *rdp)
 740{
 741        unsigned long flags;
 742
 743        spin_lock_irqsave(&rcp->lock, flags);
 744        memset(rdp, 0, sizeof(*rdp));
 745        rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2] = &rdp->nxtlist;
 746        rdp->donetail = &rdp->donelist;
 747        rdp->quiescbatch = rcp->completed;
 748        rdp->qs_pending = 0;
 749        rdp->cpu = cpu;
 750        rdp->blimit = blimit;
 751        spin_unlock_irqrestore(&rcp->lock, flags);
 752}
 753
 754static void __cpuinit rcu_online_cpu(int cpu)
 755{
 756        struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
 757        struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
 758
 759        rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
 760        rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
 761        open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 762}
 763
 764static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
 765                                unsigned long action, void *hcpu)
 766{
 767        long cpu = (long)hcpu;
 768
 769        switch (action) {
 770        case CPU_UP_PREPARE:
 771        case CPU_UP_PREPARE_FROZEN:
 772                rcu_online_cpu(cpu);
 773                break;
 774        case CPU_DEAD:
 775        case CPU_DEAD_FROZEN:
 776                rcu_offline_cpu(cpu);
 777                break;
 778        default:
 779                break;
 780        }
 781        return NOTIFY_OK;
 782}
 783
 784static struct notifier_block __cpuinitdata rcu_nb = {
 785        .notifier_call  = rcu_cpu_notify,
 786};
 787
 788/*
 789 * Initializes rcu mechanism.  Assumed to be called early.
 790 * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
 791 * Note that rcu_qsctr and friends are implicitly
 792 * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
 793 */
 794void __init __rcu_init(void)
 795{
 796#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
 797        printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
 798#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
 799        rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
 800                        (void *)(long)smp_processor_id());
 801        /* Register notifier for non-boot CPUs */
 802        register_cpu_notifier(&rcu_nb);
 803}
 804
 805module_param(blimit, int, 0);
 806module_param(qhimark, int, 0);
 807module_param(qlowmark, int, 0);
 808