linux/kernel/rcutorture.c
<<
>>
Prefs
   1/*
   2 * Read-Copy Update module-based torture test facility
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17 *
  18 * Copyright (C) IBM Corporation, 2005, 2006
  19 *
  20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
  21 *        Josh Triplett <josh@freedesktop.org>
  22 *
  23 * See also:  Documentation/RCU/torture.txt
  24 */
  25#include <linux/types.h>
  26#include <linux/kernel.h>
  27#include <linux/init.h>
  28#include <linux/module.h>
  29#include <linux/kthread.h>
  30#include <linux/err.h>
  31#include <linux/spinlock.h>
  32#include <linux/smp.h>
  33#include <linux/rcupdate.h>
  34#include <linux/interrupt.h>
  35#include <linux/sched.h>
  36#include <linux/atomic.h>
  37#include <linux/bitops.h>
  38#include <linux/completion.h>
  39#include <linux/moduleparam.h>
  40#include <linux/percpu.h>
  41#include <linux/notifier.h>
  42#include <linux/reboot.h>
  43#include <linux/freezer.h>
  44#include <linux/cpu.h>
  45#include <linux/delay.h>
  46#include <linux/stat.h>
  47#include <linux/srcu.h>
  48#include <linux/slab.h>
  49#include <asm/byteorder.h>
  50
  51MODULE_LICENSE("GPL");
  52MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>");
  53
  54static int nreaders = -1;       /* # reader threads, defaults to 2*ncpus */
  55static int nfakewriters = 4;    /* # fake writer threads */
  56static int stat_interval = 60;  /* Interval between stats, in seconds. */
  57                                /*  Zero means "only at end of test". */
  58static bool verbose;            /* Print more debug info. */
  59static bool test_no_idle_hz = true;
  60                                /* Test RCU support for tickless idle CPUs. */
  61static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
  62static int stutter = 5;         /* Start/stop testing interval (in sec) */
  63static int irqreader = 1;       /* RCU readers from irq (timers). */
  64static int fqs_duration;        /* Duration of bursts (us), 0 to disable. */
  65static int fqs_holdoff;         /* Hold time within burst (us). */
  66static int fqs_stutter = 3;     /* Wait time between bursts (s). */
  67static int n_barrier_cbs;       /* Number of callbacks to test RCU barriers. */
  68static int onoff_interval;      /* Wait time between CPU hotplugs, 0=disable. */
  69static int onoff_holdoff;       /* Seconds after boot before CPU hotplugs. */
  70static int shutdown_secs;       /* Shutdown time (s).  <=0 for no shutdown. */
  71static int stall_cpu;           /* CPU-stall duration (s).  0 for no stall. */
  72static int stall_cpu_holdoff = 10; /* Time to wait until stall (s).  */
  73static int test_boost = 1;      /* Test RCU prio boost: 0=no, 1=maybe, 2=yes. */
  74static int test_boost_interval = 7; /* Interval between boost tests, seconds. */
  75static int test_boost_duration = 4; /* Duration of each boost test, seconds. */
  76static char *torture_type = "rcu"; /* What RCU implementation to torture. */
  77
  78module_param(nreaders, int, 0444);
  79MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
  80module_param(nfakewriters, int, 0444);
  81MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
  82module_param(stat_interval, int, 0644);
  83MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
  84module_param(verbose, bool, 0444);
  85MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
  86module_param(test_no_idle_hz, bool, 0444);
  87MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
  88module_param(shuffle_interval, int, 0444);
  89MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
  90module_param(stutter, int, 0444);
  91MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
  92module_param(irqreader, int, 0444);
  93MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers");
  94module_param(fqs_duration, int, 0444);
  95MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us)");
  96module_param(fqs_holdoff, int, 0444);
  97MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
  98module_param(fqs_stutter, int, 0444);
  99MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
 100module_param(n_barrier_cbs, int, 0444);
 101MODULE_PARM_DESC(n_barrier_cbs, "# of callbacks/kthreads for barrier testing");
 102module_param(onoff_interval, int, 0444);
 103MODULE_PARM_DESC(onoff_interval, "Time between CPU hotplugs (s), 0=disable");
 104module_param(onoff_holdoff, int, 0444);
 105MODULE_PARM_DESC(onoff_holdoff, "Time after boot before CPU hotplugs (s)");
 106module_param(shutdown_secs, int, 0444);
 107MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), zero to disable.");
 108module_param(stall_cpu, int, 0444);
 109MODULE_PARM_DESC(stall_cpu, "Stall duration (s), zero to disable.");
 110module_param(stall_cpu_holdoff, int, 0444);
 111MODULE_PARM_DESC(stall_cpu_holdoff, "Time to wait before starting stall (s).");
 112module_param(test_boost, int, 0444);
 113MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
 114module_param(test_boost_interval, int, 0444);
 115MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds.");
 116module_param(test_boost_duration, int, 0444);
 117MODULE_PARM_DESC(test_boost_duration, "Duration of each boost test, seconds.");
 118module_param(torture_type, charp, 0444);
 119MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
 120
 121#define TORTURE_FLAG "-torture:"
 122#define PRINTK_STRING(s) \
 123        do { pr_alert("%s" TORTURE_FLAG s "\n", torture_type); } while (0)
 124#define VERBOSE_PRINTK_STRING(s) \
 125        do { if (verbose) pr_alert("%s" TORTURE_FLAG s "\n", torture_type); } while (0)
 126#define VERBOSE_PRINTK_ERRSTRING(s) \
 127        do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
 128
 129static char printk_buf[4096];
 130
 131static int nrealreaders;
 132static struct task_struct *writer_task;
 133static struct task_struct **fakewriter_tasks;
 134static struct task_struct **reader_tasks;
 135static struct task_struct *stats_task;
 136static struct task_struct *shuffler_task;
 137static struct task_struct *stutter_task;
 138static struct task_struct *fqs_task;
 139static struct task_struct *boost_tasks[NR_CPUS];
 140static struct task_struct *shutdown_task;
 141#ifdef CONFIG_HOTPLUG_CPU
 142static struct task_struct *onoff_task;
 143#endif /* #ifdef CONFIG_HOTPLUG_CPU */
 144static struct task_struct *stall_task;
 145static struct task_struct **barrier_cbs_tasks;
 146static struct task_struct *barrier_task;
 147
 148#define RCU_TORTURE_PIPE_LEN 10
 149
 150struct rcu_torture {
 151        struct rcu_head rtort_rcu;
 152        int rtort_pipe_count;
 153        struct list_head rtort_free;
 154        int rtort_mbtest;
 155};
 156
 157static LIST_HEAD(rcu_torture_freelist);
 158static struct rcu_torture __rcu *rcu_torture_current;
 159static unsigned long rcu_torture_current_version;
 160static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
 161static DEFINE_SPINLOCK(rcu_torture_lock);
 162static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
 163        { 0 };
 164static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
 165        { 0 };
 166static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
 167static atomic_t n_rcu_torture_alloc;
 168static atomic_t n_rcu_torture_alloc_fail;
 169static atomic_t n_rcu_torture_free;
 170static atomic_t n_rcu_torture_mberror;
 171static atomic_t n_rcu_torture_error;
 172static long n_rcu_torture_barrier_error;
 173static long n_rcu_torture_boost_ktrerror;
 174static long n_rcu_torture_boost_rterror;
 175static long n_rcu_torture_boost_failure;
 176static long n_rcu_torture_boosts;
 177static long n_rcu_torture_timers;
 178static long n_offline_attempts;
 179static long n_offline_successes;
 180static unsigned long sum_offline;
 181static int min_offline = -1;
 182static int max_offline;
 183static long n_online_attempts;
 184static long n_online_successes;
 185static unsigned long sum_online;
 186static int min_online = -1;
 187static int max_online;
 188static long n_barrier_attempts;
 189static long n_barrier_successes;
 190static struct list_head rcu_torture_removed;
 191static cpumask_var_t shuffle_tmp_mask;
 192
 193static int stutter_pause_test;
 194
 195#if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
 196#define RCUTORTURE_RUNNABLE_INIT 1
 197#else
 198#define RCUTORTURE_RUNNABLE_INIT 0
 199#endif
 200int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
 201module_param(rcutorture_runnable, int, 0444);
 202MODULE_PARM_DESC(rcutorture_runnable, "Start rcutorture at boot");
 203
 204#if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
 205#define rcu_can_boost() 1
 206#else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
 207#define rcu_can_boost() 0
 208#endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
 209
 210static unsigned long shutdown_time;     /* jiffies to system shutdown. */
 211static unsigned long boost_starttime;   /* jiffies of next boost test start. */
 212DEFINE_MUTEX(boost_mutex);              /* protect setting boost_starttime */
 213                                        /*  and boost task create/destroy. */
 214static atomic_t barrier_cbs_count;      /* Barrier callbacks registered. */
 215static bool barrier_phase;              /* Test phase. */
 216static atomic_t barrier_cbs_invoked;    /* Barrier callbacks invoked. */
 217static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
 218static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
 219
 220/* Mediate rmmod and system shutdown.  Concurrent rmmod & shutdown illegal! */
 221
 222#define FULLSTOP_DONTSTOP 0     /* Normal operation. */
 223#define FULLSTOP_SHUTDOWN 1     /* System shutdown with rcutorture running. */
 224#define FULLSTOP_RMMOD    2     /* Normal rmmod of rcutorture. */
 225static int fullstop = FULLSTOP_RMMOD;
 226/*
 227 * Protect fullstop transitions and spawning of kthreads.
 228 */
 229static DEFINE_MUTEX(fullstop_mutex);
 230
 231/* Forward reference. */
 232static void rcu_torture_cleanup(void);
 233
 234/*
 235 * Detect and respond to a system shutdown.
 236 */
 237static int
 238rcutorture_shutdown_notify(struct notifier_block *unused1,
 239                           unsigned long unused2, void *unused3)
 240{
 241        mutex_lock(&fullstop_mutex);
 242        if (fullstop == FULLSTOP_DONTSTOP)
 243                fullstop = FULLSTOP_SHUTDOWN;
 244        else
 245                pr_warn(/* but going down anyway, so... */
 246                       "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
 247        mutex_unlock(&fullstop_mutex);
 248        return NOTIFY_DONE;
 249}
 250
 251/*
 252 * Absorb kthreads into a kernel function that won't return, so that
 253 * they won't ever access module text or data again.
 254 */
 255static void rcutorture_shutdown_absorb(char *title)
 256{
 257        if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
 258                pr_notice(
 259                       "rcutorture thread %s parking due to system shutdown\n",
 260                       title);
 261                schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
 262        }
 263}
 264
 265/*
 266 * Allocate an element from the rcu_tortures pool.
 267 */
 268static struct rcu_torture *
 269rcu_torture_alloc(void)
 270{
 271        struct list_head *p;
 272
 273        spin_lock_bh(&rcu_torture_lock);
 274        if (list_empty(&rcu_torture_freelist)) {
 275                atomic_inc(&n_rcu_torture_alloc_fail);
 276                spin_unlock_bh(&rcu_torture_lock);
 277                return NULL;
 278        }
 279        atomic_inc(&n_rcu_torture_alloc);
 280        p = rcu_torture_freelist.next;
 281        list_del_init(p);
 282        spin_unlock_bh(&rcu_torture_lock);
 283        return container_of(p, struct rcu_torture, rtort_free);
 284}
 285
 286/*
 287 * Free an element to the rcu_tortures pool.
 288 */
 289static void
 290rcu_torture_free(struct rcu_torture *p)
 291{
 292        atomic_inc(&n_rcu_torture_free);
 293        spin_lock_bh(&rcu_torture_lock);
 294        list_add_tail(&p->rtort_free, &rcu_torture_freelist);
 295        spin_unlock_bh(&rcu_torture_lock);
 296}
 297
 298struct rcu_random_state {
 299        unsigned long rrs_state;
 300        long rrs_count;
 301};
 302
 303#define RCU_RANDOM_MULT 39916801  /* prime */
 304#define RCU_RANDOM_ADD  479001701 /* prime */
 305#define RCU_RANDOM_REFRESH 10000
 306
 307#define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
 308
 309/*
 310 * Crude but fast random-number generator.  Uses a linear congruential
 311 * generator, with occasional help from cpu_clock().
 312 */
 313static unsigned long
 314rcu_random(struct rcu_random_state *rrsp)
 315{
 316        if (--rrsp->rrs_count < 0) {
 317                rrsp->rrs_state += (unsigned long)local_clock();
 318                rrsp->rrs_count = RCU_RANDOM_REFRESH;
 319        }
 320        rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
 321        return swahw32(rrsp->rrs_state);
 322}
 323
 324static void
 325rcu_stutter_wait(char *title)
 326{
 327        while (stutter_pause_test || !rcutorture_runnable) {
 328                if (rcutorture_runnable)
 329                        schedule_timeout_interruptible(1);
 330                else
 331                        schedule_timeout_interruptible(round_jiffies_relative(HZ));
 332                rcutorture_shutdown_absorb(title);
 333        }
 334}
 335
 336/*
 337 * Operations vector for selecting different types of tests.
 338 */
 339
 340struct rcu_torture_ops {
 341        void (*init)(void);
 342        void (*cleanup)(void);
 343        int (*readlock)(void);
 344        void (*read_delay)(struct rcu_random_state *rrsp);
 345        void (*readunlock)(int idx);
 346        int (*completed)(void);
 347        void (*deferred_free)(struct rcu_torture *p);
 348        void (*sync)(void);
 349        void (*call)(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
 350        void (*cb_barrier)(void);
 351        void (*fqs)(void);
 352        int (*stats)(char *page);
 353        int irq_capable;
 354        int can_boost;
 355        char *name;
 356};
 357
 358static struct rcu_torture_ops *cur_ops;
 359
 360/*
 361 * Definitions for rcu torture testing.
 362 */
 363
 364static int rcu_torture_read_lock(void) __acquires(RCU)
 365{
 366        rcu_read_lock();
 367        return 0;
 368}
 369
 370static void rcu_read_delay(struct rcu_random_state *rrsp)
 371{
 372        const unsigned long shortdelay_us = 200;
 373        const unsigned long longdelay_ms = 50;
 374
 375        /* We want a short delay sometimes to make a reader delay the grace
 376         * period, and we want a long delay occasionally to trigger
 377         * force_quiescent_state. */
 378
 379        if (!(rcu_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
 380                mdelay(longdelay_ms);
 381        if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
 382                udelay(shortdelay_us);
 383#ifdef CONFIG_PREEMPT
 384        if (!preempt_count() && !(rcu_random(rrsp) % (nrealreaders * 20000)))
 385                preempt_schedule();  /* No QS if preempt_disable() in effect */
 386#endif
 387}
 388
 389static void rcu_torture_read_unlock(int idx) __releases(RCU)
 390{
 391        rcu_read_unlock();
 392}
 393
 394static int rcu_torture_completed(void)
 395{
 396        return rcu_batches_completed();
 397}
 398
 399static void
 400rcu_torture_cb(struct rcu_head *p)
 401{
 402        int i;
 403        struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
 404
 405        if (fullstop != FULLSTOP_DONTSTOP) {
 406                /* Test is ending, just drop callbacks on the floor. */
 407                /* The next initialization will pick up the pieces. */
 408                return;
 409        }
 410        i = rp->rtort_pipe_count;
 411        if (i > RCU_TORTURE_PIPE_LEN)
 412                i = RCU_TORTURE_PIPE_LEN;
 413        atomic_inc(&rcu_torture_wcount[i]);
 414        if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
 415                rp->rtort_mbtest = 0;
 416                rcu_torture_free(rp);
 417        } else {
 418                cur_ops->deferred_free(rp);
 419        }
 420}
 421
 422static int rcu_no_completed(void)
 423{
 424        return 0;
 425}
 426
 427static void rcu_torture_deferred_free(struct rcu_torture *p)
 428{
 429        call_rcu(&p->rtort_rcu, rcu_torture_cb);
 430}
 431
 432static struct rcu_torture_ops rcu_ops = {
 433        .init           = NULL,
 434        .cleanup        = NULL,
 435        .readlock       = rcu_torture_read_lock,
 436        .read_delay     = rcu_read_delay,
 437        .readunlock     = rcu_torture_read_unlock,
 438        .completed      = rcu_torture_completed,
 439        .deferred_free  = rcu_torture_deferred_free,
 440        .sync           = synchronize_rcu,
 441        .call           = call_rcu,
 442        .cb_barrier     = rcu_barrier,
 443        .fqs            = rcu_force_quiescent_state,
 444        .stats          = NULL,
 445        .irq_capable    = 1,
 446        .can_boost      = rcu_can_boost(),
 447        .name           = "rcu"
 448};
 449
 450static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
 451{
 452        int i;
 453        struct rcu_torture *rp;
 454        struct rcu_torture *rp1;
 455
 456        cur_ops->sync();
 457        list_add(&p->rtort_free, &rcu_torture_removed);
 458        list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
 459                i = rp->rtort_pipe_count;
 460                if (i > RCU_TORTURE_PIPE_LEN)
 461                        i = RCU_TORTURE_PIPE_LEN;
 462                atomic_inc(&rcu_torture_wcount[i]);
 463                if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
 464                        rp->rtort_mbtest = 0;
 465                        list_del(&rp->rtort_free);
 466                        rcu_torture_free(rp);
 467                }
 468        }
 469}
 470
 471static void rcu_sync_torture_init(void)
 472{
 473        INIT_LIST_HEAD(&rcu_torture_removed);
 474}
 475
 476static struct rcu_torture_ops rcu_sync_ops = {
 477        .init           = rcu_sync_torture_init,
 478        .cleanup        = NULL,
 479        .readlock       = rcu_torture_read_lock,
 480        .read_delay     = rcu_read_delay,
 481        .readunlock     = rcu_torture_read_unlock,
 482        .completed      = rcu_torture_completed,
 483        .deferred_free  = rcu_sync_torture_deferred_free,
 484        .sync           = synchronize_rcu,
 485        .call           = NULL,
 486        .cb_barrier     = NULL,
 487        .fqs            = rcu_force_quiescent_state,
 488        .stats          = NULL,
 489        .irq_capable    = 1,
 490        .can_boost      = rcu_can_boost(),
 491        .name           = "rcu_sync"
 492};
 493
 494static struct rcu_torture_ops rcu_expedited_ops = {
 495        .init           = rcu_sync_torture_init,
 496        .cleanup        = NULL,
 497        .readlock       = rcu_torture_read_lock,
 498        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
 499        .readunlock     = rcu_torture_read_unlock,
 500        .completed      = rcu_no_completed,
 501        .deferred_free  = rcu_sync_torture_deferred_free,
 502        .sync           = synchronize_rcu_expedited,
 503        .call           = NULL,
 504        .cb_barrier     = NULL,
 505        .fqs            = rcu_force_quiescent_state,
 506        .stats          = NULL,
 507        .irq_capable    = 1,
 508        .can_boost      = rcu_can_boost(),
 509        .name           = "rcu_expedited"
 510};
 511
 512/*
 513 * Definitions for rcu_bh torture testing.
 514 */
 515
 516static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
 517{
 518        rcu_read_lock_bh();
 519        return 0;
 520}
 521
 522static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
 523{
 524        rcu_read_unlock_bh();
 525}
 526
 527static int rcu_bh_torture_completed(void)
 528{
 529        return rcu_batches_completed_bh();
 530}
 531
 532static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
 533{
 534        call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
 535}
 536
 537static struct rcu_torture_ops rcu_bh_ops = {
 538        .init           = NULL,
 539        .cleanup        = NULL,
 540        .readlock       = rcu_bh_torture_read_lock,
 541        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
 542        .readunlock     = rcu_bh_torture_read_unlock,
 543        .completed      = rcu_bh_torture_completed,
 544        .deferred_free  = rcu_bh_torture_deferred_free,
 545        .sync           = synchronize_rcu_bh,
 546        .call           = call_rcu_bh,
 547        .cb_barrier     = rcu_barrier_bh,
 548        .fqs            = rcu_bh_force_quiescent_state,
 549        .stats          = NULL,
 550        .irq_capable    = 1,
 551        .name           = "rcu_bh"
 552};
 553
 554static struct rcu_torture_ops rcu_bh_sync_ops = {
 555        .init           = rcu_sync_torture_init,
 556        .cleanup        = NULL,
 557        .readlock       = rcu_bh_torture_read_lock,
 558        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
 559        .readunlock     = rcu_bh_torture_read_unlock,
 560        .completed      = rcu_bh_torture_completed,
 561        .deferred_free  = rcu_sync_torture_deferred_free,
 562        .sync           = synchronize_rcu_bh,
 563        .call           = NULL,
 564        .cb_barrier     = NULL,
 565        .fqs            = rcu_bh_force_quiescent_state,
 566        .stats          = NULL,
 567        .irq_capable    = 1,
 568        .name           = "rcu_bh_sync"
 569};
 570
 571static struct rcu_torture_ops rcu_bh_expedited_ops = {
 572        .init           = rcu_sync_torture_init,
 573        .cleanup        = NULL,
 574        .readlock       = rcu_bh_torture_read_lock,
 575        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
 576        .readunlock     = rcu_bh_torture_read_unlock,
 577        .completed      = rcu_bh_torture_completed,
 578        .deferred_free  = rcu_sync_torture_deferred_free,
 579        .sync           = synchronize_rcu_bh_expedited,
 580        .call           = NULL,
 581        .cb_barrier     = NULL,
 582        .fqs            = rcu_bh_force_quiescent_state,
 583        .stats          = NULL,
 584        .irq_capable    = 1,
 585        .name           = "rcu_bh_expedited"
 586};
 587
 588/*
 589 * Definitions for srcu torture testing.
 590 */
 591
 592static struct srcu_struct srcu_ctl;
 593
 594static void srcu_torture_init(void)
 595{
 596        init_srcu_struct(&srcu_ctl);
 597        rcu_sync_torture_init();
 598}
 599
 600static void srcu_torture_cleanup(void)
 601{
 602        synchronize_srcu(&srcu_ctl);
 603        cleanup_srcu_struct(&srcu_ctl);
 604}
 605
 606static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
 607{
 608        return srcu_read_lock(&srcu_ctl);
 609}
 610
 611static void srcu_read_delay(struct rcu_random_state *rrsp)
 612{
 613        long delay;
 614        const long uspertick = 1000000 / HZ;
 615        const long longdelay = 10;
 616
 617        /* We want there to be long-running readers, but not all the time. */
 618
 619        delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
 620        if (!delay)
 621                schedule_timeout_interruptible(longdelay);
 622        else
 623                rcu_read_delay(rrsp);
 624}
 625
 626static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
 627{
 628        srcu_read_unlock(&srcu_ctl, idx);
 629}
 630
 631static int srcu_torture_completed(void)
 632{
 633        return srcu_batches_completed(&srcu_ctl);
 634}
 635
 636static void srcu_torture_deferred_free(struct rcu_torture *rp)
 637{
 638        call_srcu(&srcu_ctl, &rp->rtort_rcu, rcu_torture_cb);
 639}
 640
 641static void srcu_torture_synchronize(void)
 642{
 643        synchronize_srcu(&srcu_ctl);
 644}
 645
 646static void srcu_torture_call(struct rcu_head *head,
 647                              void (*func)(struct rcu_head *head))
 648{
 649        call_srcu(&srcu_ctl, head, func);
 650}
 651
 652static void srcu_torture_barrier(void)
 653{
 654        srcu_barrier(&srcu_ctl);
 655}
 656
 657static int srcu_torture_stats(char *page)
 658{
 659        int cnt = 0;
 660        int cpu;
 661        int idx = srcu_ctl.completed & 0x1;
 662
 663        cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
 664                       torture_type, TORTURE_FLAG, idx);
 665        for_each_possible_cpu(cpu) {
 666                cnt += sprintf(&page[cnt], " %d(%lu,%lu)", cpu,
 667                               per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
 668                               per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
 669        }
 670        cnt += sprintf(&page[cnt], "\n");
 671        return cnt;
 672}
 673
 674static struct rcu_torture_ops srcu_ops = {
 675        .init           = srcu_torture_init,
 676        .cleanup        = srcu_torture_cleanup,
 677        .readlock       = srcu_torture_read_lock,
 678        .read_delay     = srcu_read_delay,
 679        .readunlock     = srcu_torture_read_unlock,
 680        .completed      = srcu_torture_completed,
 681        .deferred_free  = srcu_torture_deferred_free,
 682        .sync           = srcu_torture_synchronize,
 683        .call           = srcu_torture_call,
 684        .cb_barrier     = srcu_torture_barrier,
 685        .stats          = srcu_torture_stats,
 686        .name           = "srcu"
 687};
 688
 689static struct rcu_torture_ops srcu_sync_ops = {
 690        .init           = srcu_torture_init,
 691        .cleanup        = srcu_torture_cleanup,
 692        .readlock       = srcu_torture_read_lock,
 693        .read_delay     = srcu_read_delay,
 694        .readunlock     = srcu_torture_read_unlock,
 695        .completed      = srcu_torture_completed,
 696        .deferred_free  = rcu_sync_torture_deferred_free,
 697        .sync           = srcu_torture_synchronize,
 698        .call           = NULL,
 699        .cb_barrier     = NULL,
 700        .stats          = srcu_torture_stats,
 701        .name           = "srcu_sync"
 702};
 703
 704static int srcu_torture_read_lock_raw(void) __acquires(&srcu_ctl)
 705{
 706        return srcu_read_lock_raw(&srcu_ctl);
 707}
 708
 709static void srcu_torture_read_unlock_raw(int idx) __releases(&srcu_ctl)
 710{
 711        srcu_read_unlock_raw(&srcu_ctl, idx);
 712}
 713
 714static struct rcu_torture_ops srcu_raw_ops = {
 715        .init           = srcu_torture_init,
 716        .cleanup        = srcu_torture_cleanup,
 717        .readlock       = srcu_torture_read_lock_raw,
 718        .read_delay     = srcu_read_delay,
 719        .readunlock     = srcu_torture_read_unlock_raw,
 720        .completed      = srcu_torture_completed,
 721        .deferred_free  = srcu_torture_deferred_free,
 722        .sync           = srcu_torture_synchronize,
 723        .call           = NULL,
 724        .cb_barrier     = NULL,
 725        .stats          = srcu_torture_stats,
 726        .name           = "srcu_raw"
 727};
 728
 729static struct rcu_torture_ops srcu_raw_sync_ops = {
 730        .init           = srcu_torture_init,
 731        .cleanup        = srcu_torture_cleanup,
 732        .readlock       = srcu_torture_read_lock_raw,
 733        .read_delay     = srcu_read_delay,
 734        .readunlock     = srcu_torture_read_unlock_raw,
 735        .completed      = srcu_torture_completed,
 736        .deferred_free  = rcu_sync_torture_deferred_free,
 737        .sync           = srcu_torture_synchronize,
 738        .call           = NULL,
 739        .cb_barrier     = NULL,
 740        .stats          = srcu_torture_stats,
 741        .name           = "srcu_raw_sync"
 742};
 743
 744static void srcu_torture_synchronize_expedited(void)
 745{
 746        synchronize_srcu_expedited(&srcu_ctl);
 747}
 748
 749static struct rcu_torture_ops srcu_expedited_ops = {
 750        .init           = srcu_torture_init,
 751        .cleanup        = srcu_torture_cleanup,
 752        .readlock       = srcu_torture_read_lock,
 753        .read_delay     = srcu_read_delay,
 754        .readunlock     = srcu_torture_read_unlock,
 755        .completed      = srcu_torture_completed,
 756        .deferred_free  = rcu_sync_torture_deferred_free,
 757        .sync           = srcu_torture_synchronize_expedited,
 758        .call           = NULL,
 759        .cb_barrier     = NULL,
 760        .stats          = srcu_torture_stats,
 761        .name           = "srcu_expedited"
 762};
 763
 764/*
 765 * Definitions for sched torture testing.
 766 */
 767
 768static int sched_torture_read_lock(void)
 769{
 770        preempt_disable();
 771        return 0;
 772}
 773
 774static void sched_torture_read_unlock(int idx)
 775{
 776        preempt_enable();
 777}
 778
 779static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
 780{
 781        call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
 782}
 783
 784static struct rcu_torture_ops sched_ops = {
 785        .init           = rcu_sync_torture_init,
 786        .cleanup        = NULL,
 787        .readlock       = sched_torture_read_lock,
 788        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
 789        .readunlock     = sched_torture_read_unlock,
 790        .completed      = rcu_no_completed,
 791        .deferred_free  = rcu_sched_torture_deferred_free,
 792        .sync           = synchronize_sched,
 793        .cb_barrier     = rcu_barrier_sched,
 794        .fqs            = rcu_sched_force_quiescent_state,
 795        .stats          = NULL,
 796        .irq_capable    = 1,
 797        .name           = "sched"
 798};
 799
 800static struct rcu_torture_ops sched_sync_ops = {
 801        .init           = rcu_sync_torture_init,
 802        .cleanup        = NULL,
 803        .readlock       = sched_torture_read_lock,
 804        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
 805        .readunlock     = sched_torture_read_unlock,
 806        .completed      = rcu_no_completed,
 807        .deferred_free  = rcu_sync_torture_deferred_free,
 808        .sync           = synchronize_sched,
 809        .cb_barrier     = NULL,
 810        .fqs            = rcu_sched_force_quiescent_state,
 811        .stats          = NULL,
 812        .name           = "sched_sync"
 813};
 814
 815static struct rcu_torture_ops sched_expedited_ops = {
 816        .init           = rcu_sync_torture_init,
 817        .cleanup        = NULL,
 818        .readlock       = sched_torture_read_lock,
 819        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
 820        .readunlock     = sched_torture_read_unlock,
 821        .completed      = rcu_no_completed,
 822        .deferred_free  = rcu_sync_torture_deferred_free,
 823        .sync           = synchronize_sched_expedited,
 824        .cb_barrier     = NULL,
 825        .fqs            = rcu_sched_force_quiescent_state,
 826        .stats          = NULL,
 827        .irq_capable    = 1,
 828        .name           = "sched_expedited"
 829};
 830
 831/*
 832 * RCU torture priority-boost testing.  Runs one real-time thread per
 833 * CPU for moderate bursts, repeatedly registering RCU callbacks and
 834 * spinning waiting for them to be invoked.  If a given callback takes
 835 * too long to be invoked, we assume that priority inversion has occurred.
 836 */
 837
 838struct rcu_boost_inflight {
 839        struct rcu_head rcu;
 840        int inflight;
 841};
 842
 843static void rcu_torture_boost_cb(struct rcu_head *head)
 844{
 845        struct rcu_boost_inflight *rbip =
 846                container_of(head, struct rcu_boost_inflight, rcu);
 847
 848        smp_mb(); /* Ensure RCU-core accesses precede clearing ->inflight */
 849        rbip->inflight = 0;
 850}
 851
 852static int rcu_torture_boost(void *arg)
 853{
 854        unsigned long call_rcu_time;
 855        unsigned long endtime;
 856        unsigned long oldstarttime;
 857        struct rcu_boost_inflight rbi = { .inflight = 0 };
 858        struct sched_param sp;
 859
 860        VERBOSE_PRINTK_STRING("rcu_torture_boost started");
 861
 862        /* Set real-time priority. */
 863        sp.sched_priority = 1;
 864        if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
 865                VERBOSE_PRINTK_STRING("rcu_torture_boost RT prio failed!");
 866                n_rcu_torture_boost_rterror++;
 867        }
 868
 869        init_rcu_head_on_stack(&rbi.rcu);
 870        /* Each pass through the following loop does one boost-test cycle. */
 871        do {
 872                /* Wait for the next test interval. */
 873                oldstarttime = boost_starttime;
 874                while (ULONG_CMP_LT(jiffies, oldstarttime)) {
 875                        schedule_timeout_uninterruptible(1);
 876                        rcu_stutter_wait("rcu_torture_boost");
 877                        if (kthread_should_stop() ||
 878                            fullstop != FULLSTOP_DONTSTOP)
 879                                goto checkwait;
 880                }
 881
 882                /* Do one boost-test interval. */
 883                endtime = oldstarttime + test_boost_duration * HZ;
 884                call_rcu_time = jiffies;
 885                while (ULONG_CMP_LT(jiffies, endtime)) {
 886                        /* If we don't have a callback in flight, post one. */
 887                        if (!rbi.inflight) {
 888                                smp_mb(); /* RCU core before ->inflight = 1. */
 889                                rbi.inflight = 1;
 890                                call_rcu(&rbi.rcu, rcu_torture_boost_cb);
 891                                if (jiffies - call_rcu_time >
 892                                         test_boost_duration * HZ - HZ / 2) {
 893                                        VERBOSE_PRINTK_STRING("rcu_torture_boost boosting failed");
 894                                        n_rcu_torture_boost_failure++;
 895                                }
 896                                call_rcu_time = jiffies;
 897                        }
 898                        cond_resched();
 899                        rcu_stutter_wait("rcu_torture_boost");
 900                        if (kthread_should_stop() ||
 901                            fullstop != FULLSTOP_DONTSTOP)
 902                                goto checkwait;
 903                }
 904
 905                /*
 906                 * Set the start time of the next test interval.
 907                 * Yes, this is vulnerable to long delays, but such
 908                 * delays simply cause a false negative for the next
 909                 * interval.  Besides, we are running at RT priority,
 910                 * so delays should be relatively rare.
 911                 */
 912                while (oldstarttime == boost_starttime &&
 913                       !kthread_should_stop()) {
 914                        if (mutex_trylock(&boost_mutex)) {
 915                                boost_starttime = jiffies +
 916                                                  test_boost_interval * HZ;
 917                                n_rcu_torture_boosts++;
 918                                mutex_unlock(&boost_mutex);
 919                                break;
 920                        }
 921                        schedule_timeout_uninterruptible(1);
 922                }
 923
 924                /* Go do the stutter. */
 925checkwait:      rcu_stutter_wait("rcu_torture_boost");
 926        } while (!kthread_should_stop() && fullstop  == FULLSTOP_DONTSTOP);
 927
 928        /* Clean up and exit. */
 929        VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping");
 930        rcutorture_shutdown_absorb("rcu_torture_boost");
 931        while (!kthread_should_stop() || rbi.inflight)
 932                schedule_timeout_uninterruptible(1);
 933        smp_mb(); /* order accesses to ->inflight before stack-frame death. */
 934        destroy_rcu_head_on_stack(&rbi.rcu);
 935        return 0;
 936}
 937
 938/*
 939 * RCU torture force-quiescent-state kthread.  Repeatedly induces
 940 * bursts of calls to force_quiescent_state(), increasing the probability
 941 * of occurrence of some important types of race conditions.
 942 */
 943static int
 944rcu_torture_fqs(void *arg)
 945{
 946        unsigned long fqs_resume_time;
 947        int fqs_burst_remaining;
 948
 949        VERBOSE_PRINTK_STRING("rcu_torture_fqs task started");
 950        do {
 951                fqs_resume_time = jiffies + fqs_stutter * HZ;
 952                while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
 953                       !kthread_should_stop()) {
 954                        schedule_timeout_interruptible(1);
 955                }
 956                fqs_burst_remaining = fqs_duration;
 957                while (fqs_burst_remaining > 0 &&
 958                       !kthread_should_stop()) {
 959                        cur_ops->fqs();
 960                        udelay(fqs_holdoff);
 961                        fqs_burst_remaining -= fqs_holdoff;
 962                }
 963                rcu_stutter_wait("rcu_torture_fqs");
 964        } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
 965        VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping");
 966        rcutorture_shutdown_absorb("rcu_torture_fqs");
 967        while (!kthread_should_stop())
 968                schedule_timeout_uninterruptible(1);
 969        return 0;
 970}
 971
 972/*
 973 * RCU torture writer kthread.  Repeatedly substitutes a new structure
 974 * for that pointed to by rcu_torture_current, freeing the old structure
 975 * after a series of grace periods (the "pipeline").
 976 */
 977static int
 978rcu_torture_writer(void *arg)
 979{
 980        int i;
 981        long oldbatch = rcu_batches_completed();
 982        struct rcu_torture *rp;
 983        struct rcu_torture *old_rp;
 984        static DEFINE_RCU_RANDOM(rand);
 985
 986        VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
 987        set_user_nice(current, 19);
 988
 989        do {
 990                schedule_timeout_uninterruptible(1);
 991                rp = rcu_torture_alloc();
 992                if (rp == NULL)
 993                        continue;
 994                rp->rtort_pipe_count = 0;
 995                udelay(rcu_random(&rand) & 0x3ff);
 996                old_rp = rcu_dereference_check(rcu_torture_current,
 997                                               current == writer_task);
 998                rp->rtort_mbtest = 1;
 999                rcu_assign_pointer(rcu_torture_current, rp);
1000                smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1001                if (old_rp) {
1002                        i = old_rp->rtort_pipe_count;
1003                        if (i > RCU_TORTURE_PIPE_LEN)
1004                                i = RCU_TORTURE_PIPE_LEN;
1005                        atomic_inc(&rcu_torture_wcount[i]);
1006                        old_rp->rtort_pipe_count++;
1007                        cur_ops->deferred_free(old_rp);
1008                }
1009                rcutorture_record_progress(++rcu_torture_current_version);
1010                oldbatch = cur_ops->completed();
1011                rcu_stutter_wait("rcu_torture_writer");
1012        } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
1013        VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
1014        rcutorture_shutdown_absorb("rcu_torture_writer");
1015        while (!kthread_should_stop())
1016                schedule_timeout_uninterruptible(1);
1017        return 0;
1018}
1019
1020/*
1021 * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
1022 * delay between calls.
1023 */
1024static int
1025rcu_torture_fakewriter(void *arg)
1026{
1027        DEFINE_RCU_RANDOM(rand);
1028
1029        VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
1030        set_user_nice(current, 19);
1031
1032        do {
1033                schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
1034                udelay(rcu_random(&rand) & 0x3ff);
1035                if (cur_ops->cb_barrier != NULL &&
1036                    rcu_random(&rand) % (nfakewriters * 8) == 0)
1037                        cur_ops->cb_barrier();
1038                else
1039                        cur_ops->sync();
1040                rcu_stutter_wait("rcu_torture_fakewriter");
1041        } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
1042
1043        VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
1044        rcutorture_shutdown_absorb("rcu_torture_fakewriter");
1045        while (!kthread_should_stop())
1046                schedule_timeout_uninterruptible(1);
1047        return 0;
1048}
1049
1050void rcutorture_trace_dump(void)
1051{
1052        static atomic_t beenhere = ATOMIC_INIT(0);
1053
1054        if (atomic_read(&beenhere))
1055                return;
1056        if (atomic_xchg(&beenhere, 1) != 0)
1057                return;
1058        do_trace_rcu_torture_read(cur_ops->name, (struct rcu_head *)~0UL);
1059        ftrace_dump(DUMP_ALL);
1060}
1061
1062/*
1063 * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
1064 * incrementing the corresponding element of the pipeline array.  The
1065 * counter in the element should never be greater than 1, otherwise, the
1066 * RCU implementation is broken.
1067 */
1068static void rcu_torture_timer(unsigned long unused)
1069{
1070        int idx;
1071        int completed;
1072        static DEFINE_RCU_RANDOM(rand);
1073        static DEFINE_SPINLOCK(rand_lock);
1074        struct rcu_torture *p;
1075        int pipe_count;
1076
1077        idx = cur_ops->readlock();
1078        completed = cur_ops->completed();
1079        p = rcu_dereference_check(rcu_torture_current,
1080                                  rcu_read_lock_bh_held() ||
1081                                  rcu_read_lock_sched_held() ||
1082                                  srcu_read_lock_held(&srcu_ctl));
1083        if (p == NULL) {
1084                /* Leave because rcu_torture_writer is not yet underway */
1085                cur_ops->readunlock(idx);
1086                return;
1087        }
1088        do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
1089        if (p->rtort_mbtest == 0)
1090                atomic_inc(&n_rcu_torture_mberror);
1091        spin_lock(&rand_lock);
1092        cur_ops->read_delay(&rand);
1093        n_rcu_torture_timers++;
1094        spin_unlock(&rand_lock);
1095        preempt_disable();
1096        pipe_count = p->rtort_pipe_count;
1097        if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1098                /* Should not happen, but... */
1099                pipe_count = RCU_TORTURE_PIPE_LEN;
1100        }
1101        if (pipe_count > 1)
1102                rcutorture_trace_dump();
1103        __this_cpu_inc(rcu_torture_count[pipe_count]);
1104        completed = cur_ops->completed() - completed;
1105        if (completed > RCU_TORTURE_PIPE_LEN) {
1106                /* Should not happen, but... */
1107                completed = RCU_TORTURE_PIPE_LEN;
1108        }
1109        __this_cpu_inc(rcu_torture_batch[completed]);
1110        preempt_enable();
1111        cur_ops->readunlock(idx);
1112}
1113
1114/*
1115 * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
1116 * incrementing the corresponding element of the pipeline array.  The
1117 * counter in the element should never be greater than 1, otherwise, the
1118 * RCU implementation is broken.
1119 */
1120static int
1121rcu_torture_reader(void *arg)
1122{
1123        int completed;
1124        int idx;
1125        DEFINE_RCU_RANDOM(rand);
1126        struct rcu_torture *p;
1127        int pipe_count;
1128        struct timer_list t;
1129
1130        VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
1131        set_user_nice(current, 19);
1132        if (irqreader && cur_ops->irq_capable)
1133                setup_timer_on_stack(&t, rcu_torture_timer, 0);
1134
1135        do {
1136                if (irqreader && cur_ops->irq_capable) {
1137                        if (!timer_pending(&t))
1138                                mod_timer(&t, jiffies + 1);
1139                }
1140                idx = cur_ops->readlock();
1141                completed = cur_ops->completed();
1142                p = rcu_dereference_check(rcu_torture_current,
1143                                          rcu_read_lock_bh_held() ||
1144                                          rcu_read_lock_sched_held() ||
1145                                          srcu_read_lock_held(&srcu_ctl));
1146                if (p == NULL) {
1147                        /* Wait for rcu_torture_writer to get underway */
1148                        cur_ops->readunlock(idx);
1149                        schedule_timeout_interruptible(HZ);
1150                        continue;
1151                }
1152                do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
1153                if (p->rtort_mbtest == 0)
1154                        atomic_inc(&n_rcu_torture_mberror);
1155                cur_ops->read_delay(&rand);
1156                preempt_disable();
1157                pipe_count = p->rtort_pipe_count;
1158                if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1159                        /* Should not happen, but... */
1160                        pipe_count = RCU_TORTURE_PIPE_LEN;
1161                }
1162                if (pipe_count > 1)
1163                        rcutorture_trace_dump();
1164                __this_cpu_inc(rcu_torture_count[pipe_count]);
1165                completed = cur_ops->completed() - completed;
1166                if (completed > RCU_TORTURE_PIPE_LEN) {
1167                        /* Should not happen, but... */
1168                        completed = RCU_TORTURE_PIPE_LEN;
1169                }
1170                __this_cpu_inc(rcu_torture_batch[completed]);
1171                preempt_enable();
1172                cur_ops->readunlock(idx);
1173                schedule();
1174                rcu_stutter_wait("rcu_torture_reader");
1175        } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
1176        VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
1177        rcutorture_shutdown_absorb("rcu_torture_reader");
1178        if (irqreader && cur_ops->irq_capable)
1179                del_timer_sync(&t);
1180        while (!kthread_should_stop())
1181                schedule_timeout_uninterruptible(1);
1182        return 0;
1183}
1184
1185/*
1186 * Create an RCU-torture statistics message in the specified buffer.
1187 */
1188static int
1189rcu_torture_printk(char *page)
1190{
1191        int cnt = 0;
1192        int cpu;
1193        int i;
1194        long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1195        long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1196
1197        for_each_possible_cpu(cpu) {
1198                for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1199                        pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
1200                        batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
1201                }
1202        }
1203        for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1204                if (pipesummary[i] != 0)
1205                        break;
1206        }
1207        cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
1208        cnt += sprintf(&page[cnt],
1209                       "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1210                       rcu_torture_current,
1211                       rcu_torture_current_version,
1212                       list_empty(&rcu_torture_freelist),
1213                       atomic_read(&n_rcu_torture_alloc),
1214                       atomic_read(&n_rcu_torture_alloc_fail),
1215                       atomic_read(&n_rcu_torture_free));
1216        cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
1217                       atomic_read(&n_rcu_torture_mberror),
1218                       n_rcu_torture_boost_ktrerror,
1219                       n_rcu_torture_boost_rterror);
1220        cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
1221                       n_rcu_torture_boost_failure,
1222                       n_rcu_torture_boosts,
1223                       n_rcu_torture_timers);
1224        cnt += sprintf(&page[cnt],
1225                       "onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ",
1226                       n_online_successes, n_online_attempts,
1227                       n_offline_successes, n_offline_attempts,
1228                       min_online, max_online,
1229                       min_offline, max_offline,
1230                       sum_online, sum_offline, HZ);
1231        cnt += sprintf(&page[cnt], "barrier: %ld/%ld:%ld",
1232                       n_barrier_successes,
1233                       n_barrier_attempts,
1234                       n_rcu_torture_barrier_error);
1235        cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1236        if (atomic_read(&n_rcu_torture_mberror) != 0 ||
1237            n_rcu_torture_barrier_error != 0 ||
1238            n_rcu_torture_boost_ktrerror != 0 ||
1239            n_rcu_torture_boost_rterror != 0 ||
1240            n_rcu_torture_boost_failure != 0 ||
1241            i > 1) {
1242                cnt += sprintf(&page[cnt], "!!! ");
1243                atomic_inc(&n_rcu_torture_error);
1244                WARN_ON_ONCE(1);
1245        }
1246        cnt += sprintf(&page[cnt], "Reader Pipe: ");
1247        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1248                cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
1249        cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1250        cnt += sprintf(&page[cnt], "Reader Batch: ");
1251        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1252                cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
1253        cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1254        cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
1255        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1256                cnt += sprintf(&page[cnt], " %d",
1257                               atomic_read(&rcu_torture_wcount[i]));
1258        }
1259        cnt += sprintf(&page[cnt], "\n");
1260        if (cur_ops->stats)
1261                cnt += cur_ops->stats(&page[cnt]);
1262        return cnt;
1263}
1264
1265/*
1266 * Print torture statistics.  Caller must ensure that there is only
1267 * one call to this function at a given time!!!  This is normally
1268 * accomplished by relying on the module system to only have one copy
1269 * of the module loaded, and then by giving the rcu_torture_stats
1270 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1271 * thread is not running).
1272 */
1273static void
1274rcu_torture_stats_print(void)
1275{
1276        int cnt;
1277
1278        cnt = rcu_torture_printk(printk_buf);
1279        pr_alert("%s", printk_buf);
1280}
1281
1282/*
1283 * Periodically prints torture statistics, if periodic statistics printing
1284 * was specified via the stat_interval module parameter.
1285 *
1286 * No need to worry about fullstop here, since this one doesn't reference
1287 * volatile state or register callbacks.
1288 */
1289static int
1290rcu_torture_stats(void *arg)
1291{
1292        VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
1293        do {
1294                schedule_timeout_interruptible(stat_interval * HZ);
1295                rcu_torture_stats_print();
1296                rcutorture_shutdown_absorb("rcu_torture_stats");
1297        } while (!kthread_should_stop());
1298        VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
1299        return 0;
1300}
1301
1302static int rcu_idle_cpu;        /* Force all torture tasks off this CPU */
1303
1304/* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
1305 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
1306 */
1307static void rcu_torture_shuffle_tasks(void)
1308{
1309        int i;
1310
1311        cpumask_setall(shuffle_tmp_mask);
1312        get_online_cpus();
1313
1314        /* No point in shuffling if there is only one online CPU (ex: UP) */
1315        if (num_online_cpus() == 1) {
1316                put_online_cpus();
1317                return;
1318        }
1319
1320        if (rcu_idle_cpu != -1)
1321                cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
1322
1323        set_cpus_allowed_ptr(current, shuffle_tmp_mask);
1324
1325        if (reader_tasks) {
1326                for (i = 0; i < nrealreaders; i++)
1327                        if (reader_tasks[i])
1328                                set_cpus_allowed_ptr(reader_tasks[i],
1329                                                     shuffle_tmp_mask);
1330        }
1331
1332        if (fakewriter_tasks) {
1333                for (i = 0; i < nfakewriters; i++)
1334                        if (fakewriter_tasks[i])
1335                                set_cpus_allowed_ptr(fakewriter_tasks[i],
1336                                                     shuffle_tmp_mask);
1337        }
1338
1339        if (writer_task)
1340                set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
1341
1342        if (stats_task)
1343                set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
1344
1345        if (rcu_idle_cpu == -1)
1346                rcu_idle_cpu = num_online_cpus() - 1;
1347        else
1348                rcu_idle_cpu--;
1349
1350        put_online_cpus();
1351}
1352
1353/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
1354 * system to become idle at a time and cut off its timer ticks. This is meant
1355 * to test the support for such tickless idle CPU in RCU.
1356 */
1357static int
1358rcu_torture_shuffle(void *arg)
1359{
1360        VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
1361        do {
1362                schedule_timeout_interruptible(shuffle_interval * HZ);
1363                rcu_torture_shuffle_tasks();
1364                rcutorture_shutdown_absorb("rcu_torture_shuffle");
1365        } while (!kthread_should_stop());
1366        VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
1367        return 0;
1368}
1369
1370/* Cause the rcutorture test to "stutter", starting and stopping all
1371 * threads periodically.
1372 */
1373static int
1374rcu_torture_stutter(void *arg)
1375{
1376        VERBOSE_PRINTK_STRING("rcu_torture_stutter task started");
1377        do {
1378                schedule_timeout_interruptible(stutter * HZ);
1379                stutter_pause_test = 1;
1380                if (!kthread_should_stop())
1381                        schedule_timeout_interruptible(stutter * HZ);
1382                stutter_pause_test = 0;
1383                rcutorture_shutdown_absorb("rcu_torture_stutter");
1384        } while (!kthread_should_stop());
1385        VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
1386        return 0;
1387}
1388
1389static inline void
1390rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, char *tag)
1391{
1392        pr_alert("%s" TORTURE_FLAG
1393                 "--- %s: nreaders=%d nfakewriters=%d "
1394                 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1395                 "shuffle_interval=%d stutter=%d irqreader=%d "
1396                 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1397                 "test_boost=%d/%d test_boost_interval=%d "
1398                 "test_boost_duration=%d shutdown_secs=%d "
1399                 "onoff_interval=%d onoff_holdoff=%d\n",
1400                 torture_type, tag, nrealreaders, nfakewriters,
1401                 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1402                 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1403                 test_boost, cur_ops->can_boost,
1404                 test_boost_interval, test_boost_duration, shutdown_secs,
1405                 onoff_interval, onoff_holdoff);
1406}
1407
1408static struct notifier_block rcutorture_shutdown_nb = {
1409        .notifier_call = rcutorture_shutdown_notify,
1410};
1411
1412static void rcutorture_booster_cleanup(int cpu)
1413{
1414        struct task_struct *t;
1415
1416        if (boost_tasks[cpu] == NULL)
1417                return;
1418        mutex_lock(&boost_mutex);
1419        VERBOSE_PRINTK_STRING("Stopping rcu_torture_boost task");
1420        t = boost_tasks[cpu];
1421        boost_tasks[cpu] = NULL;
1422        mutex_unlock(&boost_mutex);
1423
1424        /* This must be outside of the mutex, otherwise deadlock! */
1425        kthread_stop(t);
1426        boost_tasks[cpu] = NULL;
1427}
1428
1429static int rcutorture_booster_init(int cpu)
1430{
1431        int retval;
1432
1433        if (boost_tasks[cpu] != NULL)
1434                return 0;  /* Already created, nothing more to do. */
1435
1436        /* Don't allow time recalculation while creating a new task. */
1437        mutex_lock(&boost_mutex);
1438        VERBOSE_PRINTK_STRING("Creating rcu_torture_boost task");
1439        boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1440                                                  cpu_to_node(cpu),
1441                                                  "rcu_torture_boost");
1442        if (IS_ERR(boost_tasks[cpu])) {
1443                retval = PTR_ERR(boost_tasks[cpu]);
1444                VERBOSE_PRINTK_STRING("rcu_torture_boost task create failed");
1445                n_rcu_torture_boost_ktrerror++;
1446                boost_tasks[cpu] = NULL;
1447                mutex_unlock(&boost_mutex);
1448                return retval;
1449        }
1450        kthread_bind(boost_tasks[cpu], cpu);
1451        wake_up_process(boost_tasks[cpu]);
1452        mutex_unlock(&boost_mutex);
1453        return 0;
1454}
1455
1456/*
1457 * Cause the rcutorture test to shutdown the system after the test has
1458 * run for the time specified by the shutdown_secs module parameter.
1459 */
1460static int
1461rcu_torture_shutdown(void *arg)
1462{
1463        long delta;
1464        unsigned long jiffies_snap;
1465
1466        VERBOSE_PRINTK_STRING("rcu_torture_shutdown task started");
1467        jiffies_snap = ACCESS_ONCE(jiffies);
1468        while (ULONG_CMP_LT(jiffies_snap, shutdown_time) &&
1469               !kthread_should_stop()) {
1470                delta = shutdown_time - jiffies_snap;
1471                if (verbose)
1472                        pr_alert("%s" TORTURE_FLAG
1473                                 "rcu_torture_shutdown task: %lu jiffies remaining\n",
1474                                 torture_type, delta);
1475                schedule_timeout_interruptible(delta);
1476                jiffies_snap = ACCESS_ONCE(jiffies);
1477        }
1478        if (kthread_should_stop()) {
1479                VERBOSE_PRINTK_STRING("rcu_torture_shutdown task stopping");
1480                return 0;
1481        }
1482
1483        /* OK, shut down the system. */
1484
1485        VERBOSE_PRINTK_STRING("rcu_torture_shutdown task shutting down system");
1486        shutdown_task = NULL;   /* Avoid self-kill deadlock. */
1487        rcu_torture_cleanup();  /* Get the success/failure message. */
1488        kernel_power_off();     /* Shut down the system. */
1489        return 0;
1490}
1491
1492#ifdef CONFIG_HOTPLUG_CPU
1493
1494/*
1495 * Execute random CPU-hotplug operations at the interval specified
1496 * by the onoff_interval.
1497 */
1498static int __cpuinit
1499rcu_torture_onoff(void *arg)
1500{
1501        int cpu;
1502        unsigned long delta;
1503        int maxcpu = -1;
1504        DEFINE_RCU_RANDOM(rand);
1505        unsigned long starttime;
1506
1507        VERBOSE_PRINTK_STRING("rcu_torture_onoff task started");
1508        for_each_online_cpu(cpu)
1509                maxcpu = cpu;
1510        WARN_ON(maxcpu < 0);
1511        if (onoff_holdoff > 0) {
1512                VERBOSE_PRINTK_STRING("rcu_torture_onoff begin holdoff");
1513                schedule_timeout_interruptible(onoff_holdoff * HZ);
1514                VERBOSE_PRINTK_STRING("rcu_torture_onoff end holdoff");
1515        }
1516        while (!kthread_should_stop()) {
1517                cpu = (rcu_random(&rand) >> 4) % (maxcpu + 1);
1518                if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) {
1519                        if (verbose)
1520                                pr_alert("%s" TORTURE_FLAG
1521                                         "rcu_torture_onoff task: offlining %d\n",
1522                                         torture_type, cpu);
1523                        starttime = jiffies;
1524                        n_offline_attempts++;
1525                        if (cpu_down(cpu) == 0) {
1526                                if (verbose)
1527                                        pr_alert("%s" TORTURE_FLAG
1528                                                 "rcu_torture_onoff task: offlined %d\n",
1529                                                 torture_type, cpu);
1530                                n_offline_successes++;
1531                                delta = jiffies - starttime;
1532                                sum_offline += delta;
1533                                if (min_offline < 0) {
1534                                        min_offline = delta;
1535                                        max_offline = delta;
1536                                }
1537                                if (min_offline > delta)
1538                                        min_offline = delta;
1539                                if (max_offline < delta)
1540                                        max_offline = delta;
1541                        }
1542                } else if (cpu_is_hotpluggable(cpu)) {
1543                        if (verbose)
1544                                pr_alert("%s" TORTURE_FLAG
1545                                         "rcu_torture_onoff task: onlining %d\n",
1546                                         torture_type, cpu);
1547                        starttime = jiffies;
1548                        n_online_attempts++;
1549                        if (cpu_up(cpu) == 0) {
1550                                if (verbose)
1551                                        pr_alert("%s" TORTURE_FLAG
1552                                                 "rcu_torture_onoff task: onlined %d\n",
1553                                                 torture_type, cpu);
1554                                n_online_successes++;
1555                                delta = jiffies - starttime;
1556                                sum_online += delta;
1557                                if (min_online < 0) {
1558                                        min_online = delta;
1559                                        max_online = delta;
1560                                }
1561                                if (min_online > delta)
1562                                        min_online = delta;
1563                                if (max_online < delta)
1564                                        max_online = delta;
1565                        }
1566                }
1567                schedule_timeout_interruptible(onoff_interval * HZ);
1568        }
1569        VERBOSE_PRINTK_STRING("rcu_torture_onoff task stopping");
1570        return 0;
1571}
1572
1573static int __cpuinit
1574rcu_torture_onoff_init(void)
1575{
1576        int ret;
1577
1578        if (onoff_interval <= 0)
1579                return 0;
1580        onoff_task = kthread_run(rcu_torture_onoff, NULL, "rcu_torture_onoff");
1581        if (IS_ERR(onoff_task)) {
1582                ret = PTR_ERR(onoff_task);
1583                onoff_task = NULL;
1584                return ret;
1585        }
1586        return 0;
1587}
1588
1589static void rcu_torture_onoff_cleanup(void)
1590{
1591        if (onoff_task == NULL)
1592                return;
1593        VERBOSE_PRINTK_STRING("Stopping rcu_torture_onoff task");
1594        kthread_stop(onoff_task);
1595        onoff_task = NULL;
1596}
1597
1598#else /* #ifdef CONFIG_HOTPLUG_CPU */
1599
1600static int
1601rcu_torture_onoff_init(void)
1602{
1603        return 0;
1604}
1605
1606static void rcu_torture_onoff_cleanup(void)
1607{
1608}
1609
1610#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
1611
1612/*
1613 * CPU-stall kthread.  It waits as specified by stall_cpu_holdoff, then
1614 * induces a CPU stall for the time specified by stall_cpu.
1615 */
1616static int __cpuinit rcu_torture_stall(void *args)
1617{
1618        unsigned long stop_at;
1619
1620        VERBOSE_PRINTK_STRING("rcu_torture_stall task started");
1621        if (stall_cpu_holdoff > 0) {
1622                VERBOSE_PRINTK_STRING("rcu_torture_stall begin holdoff");
1623                schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
1624                VERBOSE_PRINTK_STRING("rcu_torture_stall end holdoff");
1625        }
1626        if (!kthread_should_stop()) {
1627                stop_at = get_seconds() + stall_cpu;
1628                /* RCU CPU stall is expected behavior in following code. */
1629                pr_alert("rcu_torture_stall start.\n");
1630                rcu_read_lock();
1631                preempt_disable();
1632                while (ULONG_CMP_LT(get_seconds(), stop_at))
1633                        continue;  /* Induce RCU CPU stall warning. */
1634                preempt_enable();
1635                rcu_read_unlock();
1636                pr_alert("rcu_torture_stall end.\n");
1637        }
1638        rcutorture_shutdown_absorb("rcu_torture_stall");
1639        while (!kthread_should_stop())
1640                schedule_timeout_interruptible(10 * HZ);
1641        return 0;
1642}
1643
1644/* Spawn CPU-stall kthread, if stall_cpu specified. */
1645static int __init rcu_torture_stall_init(void)
1646{
1647        int ret;
1648
1649        if (stall_cpu <= 0)
1650                return 0;
1651        stall_task = kthread_run(rcu_torture_stall, NULL, "rcu_torture_stall");
1652        if (IS_ERR(stall_task)) {
1653                ret = PTR_ERR(stall_task);
1654                stall_task = NULL;
1655                return ret;
1656        }
1657        return 0;
1658}
1659
1660/* Clean up after the CPU-stall kthread, if one was spawned. */
1661static void rcu_torture_stall_cleanup(void)
1662{
1663        if (stall_task == NULL)
1664                return;
1665        VERBOSE_PRINTK_STRING("Stopping rcu_torture_stall_task.");
1666        kthread_stop(stall_task);
1667        stall_task = NULL;
1668}
1669
1670/* Callback function for RCU barrier testing. */
1671void rcu_torture_barrier_cbf(struct rcu_head *rcu)
1672{
1673        atomic_inc(&barrier_cbs_invoked);
1674}
1675
1676/* kthread function to register callbacks used to test RCU barriers. */
1677static int rcu_torture_barrier_cbs(void *arg)
1678{
1679        long myid = (long)arg;
1680        bool lastphase = 0;
1681        struct rcu_head rcu;
1682
1683        init_rcu_head_on_stack(&rcu);
1684        VERBOSE_PRINTK_STRING("rcu_torture_barrier_cbs task started");
1685        set_user_nice(current, 19);
1686        do {
1687                wait_event(barrier_cbs_wq[myid],
1688                           barrier_phase != lastphase ||
1689                           kthread_should_stop() ||
1690                           fullstop != FULLSTOP_DONTSTOP);
1691                lastphase = barrier_phase;
1692                smp_mb(); /* ensure barrier_phase load before ->call(). */
1693                if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
1694                        break;
1695                cur_ops->call(&rcu, rcu_torture_barrier_cbf);
1696                if (atomic_dec_and_test(&barrier_cbs_count))
1697                        wake_up(&barrier_wq);
1698        } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
1699        VERBOSE_PRINTK_STRING("rcu_torture_barrier_cbs task stopping");
1700        rcutorture_shutdown_absorb("rcu_torture_barrier_cbs");
1701        while (!kthread_should_stop())
1702                schedule_timeout_interruptible(1);
1703        cur_ops->cb_barrier();
1704        destroy_rcu_head_on_stack(&rcu);
1705        return 0;
1706}
1707
1708/* kthread function to drive and coordinate RCU barrier testing. */
1709static int rcu_torture_barrier(void *arg)
1710{
1711        int i;
1712
1713        VERBOSE_PRINTK_STRING("rcu_torture_barrier task starting");
1714        do {
1715                atomic_set(&barrier_cbs_invoked, 0);
1716                atomic_set(&barrier_cbs_count, n_barrier_cbs);
1717                smp_mb(); /* Ensure barrier_phase after prior assignments. */
1718                barrier_phase = !barrier_phase;
1719                for (i = 0; i < n_barrier_cbs; i++)
1720                        wake_up(&barrier_cbs_wq[i]);
1721                wait_event(barrier_wq,
1722                           atomic_read(&barrier_cbs_count) == 0 ||
1723                           kthread_should_stop() ||
1724                           fullstop != FULLSTOP_DONTSTOP);
1725                if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
1726                        break;
1727                n_barrier_attempts++;
1728                cur_ops->cb_barrier();
1729                if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
1730                        n_rcu_torture_barrier_error++;
1731                        WARN_ON_ONCE(1);
1732                }
1733                n_barrier_successes++;
1734                schedule_timeout_interruptible(HZ / 10);
1735        } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
1736        VERBOSE_PRINTK_STRING("rcu_torture_barrier task stopping");
1737        rcutorture_shutdown_absorb("rcu_torture_barrier");
1738        while (!kthread_should_stop())
1739                schedule_timeout_interruptible(1);
1740        return 0;
1741}
1742
1743/* Initialize RCU barrier testing. */
1744static int rcu_torture_barrier_init(void)
1745{
1746        int i;
1747        int ret;
1748
1749        if (n_barrier_cbs == 0)
1750                return 0;
1751        if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
1752                pr_alert("%s" TORTURE_FLAG
1753                         " Call or barrier ops missing for %s,\n",
1754                         torture_type, cur_ops->name);
1755                pr_alert("%s" TORTURE_FLAG
1756                         " RCU barrier testing omitted from run.\n",
1757                         torture_type);
1758                return 0;
1759        }
1760        atomic_set(&barrier_cbs_count, 0);
1761        atomic_set(&barrier_cbs_invoked, 0);
1762        barrier_cbs_tasks =
1763                kzalloc(n_barrier_cbs * sizeof(barrier_cbs_tasks[0]),
1764                        GFP_KERNEL);
1765        barrier_cbs_wq =
1766                kzalloc(n_barrier_cbs * sizeof(barrier_cbs_wq[0]),
1767                        GFP_KERNEL);
1768        if (barrier_cbs_tasks == NULL || barrier_cbs_wq == 0)
1769                return -ENOMEM;
1770        for (i = 0; i < n_barrier_cbs; i++) {
1771                init_waitqueue_head(&barrier_cbs_wq[i]);
1772                barrier_cbs_tasks[i] = kthread_run(rcu_torture_barrier_cbs,
1773                                                   (void *)(long)i,
1774                                                   "rcu_torture_barrier_cbs");
1775                if (IS_ERR(barrier_cbs_tasks[i])) {
1776                        ret = PTR_ERR(barrier_cbs_tasks[i]);
1777                        VERBOSE_PRINTK_ERRSTRING("Failed to create rcu_torture_barrier_cbs");
1778                        barrier_cbs_tasks[i] = NULL;
1779                        return ret;
1780                }
1781        }
1782        barrier_task = kthread_run(rcu_torture_barrier, NULL,
1783                                   "rcu_torture_barrier");
1784        if (IS_ERR(barrier_task)) {
1785                ret = PTR_ERR(barrier_task);
1786                VERBOSE_PRINTK_ERRSTRING("Failed to create rcu_torture_barrier");
1787                barrier_task = NULL;
1788        }
1789        return 0;
1790}
1791
1792/* Clean up after RCU barrier testing. */
1793static void rcu_torture_barrier_cleanup(void)
1794{
1795        int i;
1796
1797        if (barrier_task != NULL) {
1798                VERBOSE_PRINTK_STRING("Stopping rcu_torture_barrier task");
1799                kthread_stop(barrier_task);
1800                barrier_task = NULL;
1801        }
1802        if (barrier_cbs_tasks != NULL) {
1803                for (i = 0; i < n_barrier_cbs; i++) {
1804                        if (barrier_cbs_tasks[i] != NULL) {
1805                                VERBOSE_PRINTK_STRING("Stopping rcu_torture_barrier_cbs task");
1806                                kthread_stop(barrier_cbs_tasks[i]);
1807                                barrier_cbs_tasks[i] = NULL;
1808                        }
1809                }
1810                kfree(barrier_cbs_tasks);
1811                barrier_cbs_tasks = NULL;
1812        }
1813        if (barrier_cbs_wq != NULL) {
1814                kfree(barrier_cbs_wq);
1815                barrier_cbs_wq = NULL;
1816        }
1817}
1818
1819static int rcutorture_cpu_notify(struct notifier_block *self,
1820                                 unsigned long action, void *hcpu)
1821{
1822        long cpu = (long)hcpu;
1823
1824        switch (action) {
1825        case CPU_ONLINE:
1826        case CPU_DOWN_FAILED:
1827                (void)rcutorture_booster_init(cpu);
1828                break;
1829        case CPU_DOWN_PREPARE:
1830                rcutorture_booster_cleanup(cpu);
1831                break;
1832        default:
1833                break;
1834        }
1835        return NOTIFY_OK;
1836}
1837
1838static struct notifier_block rcutorture_cpu_nb = {
1839        .notifier_call = rcutorture_cpu_notify,
1840};
1841
1842static void
1843rcu_torture_cleanup(void)
1844{
1845        int i;
1846
1847        mutex_lock(&fullstop_mutex);
1848        rcutorture_record_test_transition();
1849        if (fullstop == FULLSTOP_SHUTDOWN) {
1850                pr_warn(/* but going down anyway, so... */
1851                       "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
1852                mutex_unlock(&fullstop_mutex);
1853                schedule_timeout_uninterruptible(10);
1854                if (cur_ops->cb_barrier != NULL)
1855                        cur_ops->cb_barrier();
1856                return;
1857        }
1858        fullstop = FULLSTOP_RMMOD;
1859        mutex_unlock(&fullstop_mutex);
1860        unregister_reboot_notifier(&rcutorture_shutdown_nb);
1861        rcu_torture_barrier_cleanup();
1862        rcu_torture_stall_cleanup();
1863        if (stutter_task) {
1864                VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
1865                kthread_stop(stutter_task);
1866        }
1867        stutter_task = NULL;
1868        if (shuffler_task) {
1869                VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
1870                kthread_stop(shuffler_task);
1871                free_cpumask_var(shuffle_tmp_mask);
1872        }
1873        shuffler_task = NULL;
1874
1875        if (writer_task) {
1876                VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
1877                kthread_stop(writer_task);
1878        }
1879        writer_task = NULL;
1880
1881        if (reader_tasks) {
1882                for (i = 0; i < nrealreaders; i++) {
1883                        if (reader_tasks[i]) {
1884                                VERBOSE_PRINTK_STRING(
1885                                        "Stopping rcu_torture_reader task");
1886                                kthread_stop(reader_tasks[i]);
1887                        }
1888                        reader_tasks[i] = NULL;
1889                }
1890                kfree(reader_tasks);
1891                reader_tasks = NULL;
1892        }
1893        rcu_torture_current = NULL;
1894
1895        if (fakewriter_tasks) {
1896                for (i = 0; i < nfakewriters; i++) {
1897                        if (fakewriter_tasks[i]) {
1898                                VERBOSE_PRINTK_STRING(
1899                                        "Stopping rcu_torture_fakewriter task");
1900                                kthread_stop(fakewriter_tasks[i]);
1901                        }
1902                        fakewriter_tasks[i] = NULL;
1903                }
1904                kfree(fakewriter_tasks);
1905                fakewriter_tasks = NULL;
1906        }
1907
1908        if (stats_task) {
1909                VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
1910                kthread_stop(stats_task);
1911        }
1912        stats_task = NULL;
1913
1914        if (fqs_task) {
1915                VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task");
1916                kthread_stop(fqs_task);
1917        }
1918        fqs_task = NULL;
1919        if ((test_boost == 1 && cur_ops->can_boost) ||
1920            test_boost == 2) {
1921                unregister_cpu_notifier(&rcutorture_cpu_nb);
1922                for_each_possible_cpu(i)
1923                        rcutorture_booster_cleanup(i);
1924        }
1925        if (shutdown_task != NULL) {
1926                VERBOSE_PRINTK_STRING("Stopping rcu_torture_shutdown task");
1927                kthread_stop(shutdown_task);
1928        }
1929        shutdown_task = NULL;
1930        rcu_torture_onoff_cleanup();
1931
1932        /* Wait for all RCU callbacks to fire.  */
1933
1934        if (cur_ops->cb_barrier != NULL)
1935                cur_ops->cb_barrier();
1936
1937        rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
1938
1939        if (cur_ops->cleanup)
1940                cur_ops->cleanup();
1941        if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
1942                rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
1943        else if (n_online_successes != n_online_attempts ||
1944                 n_offline_successes != n_offline_attempts)
1945                rcu_torture_print_module_parms(cur_ops,
1946                                               "End of test: RCU_HOTPLUG");
1947        else
1948                rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
1949}
1950
1951static int __init
1952rcu_torture_init(void)
1953{
1954        int i;
1955        int cpu;
1956        int firsterr = 0;
1957        int retval;
1958        static struct rcu_torture_ops *torture_ops[] =
1959                { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
1960                  &rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops,
1961                  &srcu_ops, &srcu_sync_ops, &srcu_expedited_ops,
1962                  &srcu_raw_ops, &srcu_raw_sync_ops,
1963                  &sched_ops, &sched_sync_ops, &sched_expedited_ops, };
1964
1965        mutex_lock(&fullstop_mutex);
1966
1967        /* Process args and tell the world that the torturer is on the job. */
1968        for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1969                cur_ops = torture_ops[i];
1970                if (strcmp(torture_type, cur_ops->name) == 0)
1971                        break;
1972        }
1973        if (i == ARRAY_SIZE(torture_ops)) {
1974                pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
1975                         torture_type);
1976                pr_alert("rcu-torture types:");
1977                for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1978                        pr_alert(" %s", torture_ops[i]->name);
1979                pr_alert("\n");
1980                mutex_unlock(&fullstop_mutex);
1981                return -EINVAL;
1982        }
1983        if (cur_ops->fqs == NULL && fqs_duration != 0) {
1984                pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
1985                fqs_duration = 0;
1986        }
1987        if (cur_ops->init)
1988                cur_ops->init(); /* no "goto unwind" prior to this point!!! */
1989
1990        if (nreaders >= 0)
1991                nrealreaders = nreaders;
1992        else
1993                nrealreaders = 2 * num_online_cpus();
1994        rcu_torture_print_module_parms(cur_ops, "Start of test");
1995        fullstop = FULLSTOP_DONTSTOP;
1996
1997        /* Set up the freelist. */
1998
1999        INIT_LIST_HEAD(&rcu_torture_freelist);
2000        for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
2001                rcu_tortures[i].rtort_mbtest = 0;
2002                list_add_tail(&rcu_tortures[i].rtort_free,
2003                              &rcu_torture_freelist);
2004        }
2005
2006        /* Initialize the statistics so that each run gets its own numbers. */
2007
2008        rcu_torture_current = NULL;
2009        rcu_torture_current_version = 0;
2010        atomic_set(&n_rcu_torture_alloc, 0);
2011        atomic_set(&n_rcu_torture_alloc_fail, 0);
2012        atomic_set(&n_rcu_torture_free, 0);
2013        atomic_set(&n_rcu_torture_mberror, 0);
2014        atomic_set(&n_rcu_torture_error, 0);
2015        n_rcu_torture_barrier_error = 0;
2016        n_rcu_torture_boost_ktrerror = 0;
2017        n_rcu_torture_boost_rterror = 0;
2018        n_rcu_torture_boost_failure = 0;
2019        n_rcu_torture_boosts = 0;
2020        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2021                atomic_set(&rcu_torture_wcount[i], 0);
2022        for_each_possible_cpu(cpu) {
2023                for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2024                        per_cpu(rcu_torture_count, cpu)[i] = 0;
2025                        per_cpu(rcu_torture_batch, cpu)[i] = 0;
2026                }
2027        }
2028
2029        /* Start up the kthreads. */
2030
2031        VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
2032        writer_task = kthread_create(rcu_torture_writer, NULL,
2033                                     "rcu_torture_writer");
2034        if (IS_ERR(writer_task)) {
2035                firsterr = PTR_ERR(writer_task);
2036                VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
2037                writer_task = NULL;
2038                goto unwind;
2039        }
2040        wake_up_process(writer_task);
2041        fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
2042                                   GFP_KERNEL);
2043        if (fakewriter_tasks == NULL) {
2044                VERBOSE_PRINTK_ERRSTRING("out of memory");
2045                firsterr = -ENOMEM;
2046                goto unwind;
2047        }
2048        for (i = 0; i < nfakewriters; i++) {
2049                VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
2050                fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
2051                                                  "rcu_torture_fakewriter");
2052                if (IS_ERR(fakewriter_tasks[i])) {
2053                        firsterr = PTR_ERR(fakewriter_tasks[i]);
2054                        VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
2055                        fakewriter_tasks[i] = NULL;
2056                        goto unwind;
2057                }
2058        }
2059        reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
2060                               GFP_KERNEL);
2061        if (reader_tasks == NULL) {
2062                VERBOSE_PRINTK_ERRSTRING("out of memory");
2063                firsterr = -ENOMEM;
2064                goto unwind;
2065        }
2066        for (i = 0; i < nrealreaders; i++) {
2067                VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
2068                reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
2069                                              "rcu_torture_reader");
2070                if (IS_ERR(reader_tasks[i])) {
2071                        firsterr = PTR_ERR(reader_tasks[i]);
2072                        VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
2073                        reader_tasks[i] = NULL;
2074                        goto unwind;
2075                }
2076        }
2077        if (stat_interval > 0) {
2078                VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
2079                stats_task = kthread_run(rcu_torture_stats, NULL,
2080                                        "rcu_torture_stats");
2081                if (IS_ERR(stats_task)) {
2082                        firsterr = PTR_ERR(stats_task);
2083                        VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
2084                        stats_task = NULL;
2085                        goto unwind;
2086                }
2087        }
2088        if (test_no_idle_hz) {
2089                rcu_idle_cpu = num_online_cpus() - 1;
2090
2091                if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
2092                        firsterr = -ENOMEM;
2093                        VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
2094                        goto unwind;
2095                }
2096
2097                /* Create the shuffler thread */
2098                shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
2099                                          "rcu_torture_shuffle");
2100                if (IS_ERR(shuffler_task)) {
2101                        free_cpumask_var(shuffle_tmp_mask);
2102                        firsterr = PTR_ERR(shuffler_task);
2103                        VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
2104                        shuffler_task = NULL;
2105                        goto unwind;
2106                }
2107        }
2108        if (stutter < 0)
2109                stutter = 0;
2110        if (stutter) {
2111                /* Create the stutter thread */
2112                stutter_task = kthread_run(rcu_torture_stutter, NULL,
2113                                          "rcu_torture_stutter");
2114                if (IS_ERR(stutter_task)) {
2115                        firsterr = PTR_ERR(stutter_task);
2116                        VERBOSE_PRINTK_ERRSTRING("Failed to create stutter");
2117                        stutter_task = NULL;
2118                        goto unwind;
2119                }
2120        }
2121        if (fqs_duration < 0)
2122                fqs_duration = 0;
2123        if (fqs_duration) {
2124                /* Create the stutter thread */
2125                fqs_task = kthread_run(rcu_torture_fqs, NULL,
2126                                       "rcu_torture_fqs");
2127                if (IS_ERR(fqs_task)) {
2128                        firsterr = PTR_ERR(fqs_task);
2129                        VERBOSE_PRINTK_ERRSTRING("Failed to create fqs");
2130                        fqs_task = NULL;
2131                        goto unwind;
2132                }
2133        }
2134        if (test_boost_interval < 1)
2135                test_boost_interval = 1;
2136        if (test_boost_duration < 2)
2137                test_boost_duration = 2;
2138        if ((test_boost == 1 && cur_ops->can_boost) ||
2139            test_boost == 2) {
2140
2141                boost_starttime = jiffies + test_boost_interval * HZ;
2142                register_cpu_notifier(&rcutorture_cpu_nb);
2143                for_each_possible_cpu(i) {
2144                        if (cpu_is_offline(i))
2145                                continue;  /* Heuristic: CPU can go offline. */
2146                        retval = rcutorture_booster_init(i);
2147                        if (retval < 0) {
2148                                firsterr = retval;
2149                                goto unwind;
2150                        }
2151                }
2152        }
2153        if (shutdown_secs > 0) {
2154                shutdown_time = jiffies + shutdown_secs * HZ;
2155                shutdown_task = kthread_create(rcu_torture_shutdown, NULL,
2156                                               "rcu_torture_shutdown");
2157                if (IS_ERR(shutdown_task)) {
2158                        firsterr = PTR_ERR(shutdown_task);
2159                        VERBOSE_PRINTK_ERRSTRING("Failed to create shutdown");
2160                        shutdown_task = NULL;
2161                        goto unwind;
2162                }
2163                wake_up_process(shutdown_task);
2164        }
2165        i = rcu_torture_onoff_init();
2166        if (i != 0) {
2167                firsterr = i;
2168                goto unwind;
2169        }
2170        register_reboot_notifier(&rcutorture_shutdown_nb);
2171        i = rcu_torture_stall_init();
2172        if (i != 0) {
2173                firsterr = i;
2174                goto unwind;
2175        }
2176        retval = rcu_torture_barrier_init();
2177        if (retval != 0) {
2178                firsterr = retval;
2179                goto unwind;
2180        }
2181        rcutorture_record_test_transition();
2182        mutex_unlock(&fullstop_mutex);
2183        return 0;
2184
2185unwind:
2186        mutex_unlock(&fullstop_mutex);
2187        rcu_torture_cleanup();
2188        return firsterr;
2189}
2190
2191module_init(rcu_torture_init);
2192module_exit(rcu_torture_cleanup);
2193
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.