linux/kernel/rcutorture.c
<<
>>
Prefs
   1/*
   2 * Read-Copy Update module-based torture test facility
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17 *
  18 * Copyright (C) IBM Corporation, 2005, 2006
  19 *
  20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
  21 *        Josh Triplett <josh@freedesktop.org>
  22 *
  23 * See also:  Documentation/RCU/torture.txt
  24 */
  25#include <linux/types.h>
  26#include <linux/kernel.h>
  27#include <linux/init.h>
  28#include <linux/module.h>
  29#include <linux/kthread.h>
  30#include <linux/err.h>
  31#include <linux/spinlock.h>
  32#include <linux/smp.h>
  33#include <linux/rcupdate.h>
  34#include <linux/interrupt.h>
  35#include <linux/sched.h>
  36#include <asm/atomic.h>
  37#include <linux/bitops.h>
  38#include <linux/completion.h>
  39#include <linux/moduleparam.h>
  40#include <linux/percpu.h>
  41#include <linux/notifier.h>
  42#include <linux/reboot.h>
  43#include <linux/freezer.h>
  44#include <linux/cpu.h>
  45#include <linux/delay.h>
  46#include <linux/stat.h>
  47#include <linux/srcu.h>
  48#include <linux/slab.h>
  49#include <asm/byteorder.h>
  50
  51MODULE_LICENSE("GPL");
  52MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
  53              "Josh Triplett <josh@freedesktop.org>");
  54
  55static int nreaders = -1;       /* # reader threads, defaults to 2*ncpus */
  56static int nfakewriters = 4;    /* # fake writer threads */
  57static int stat_interval;       /* Interval between stats, in seconds. */
  58                                /*  Defaults to "only at end of test". */
  59static int verbose;             /* Print more debug info. */
  60static int test_no_idle_hz;     /* Test RCU's support for tickless idle CPUs. */
  61static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
  62static int stutter = 5;         /* Start/stop testing interval (in sec) */
  63static int irqreader = 1;       /* RCU readers from irq (timers). */
  64static int fqs_duration = 0;    /* Duration of bursts (us), 0 to disable. */
  65static int fqs_holdoff = 0;     /* Hold time within burst (us). */
  66static int fqs_stutter = 3;     /* Wait time between bursts (s). */
  67static char *torture_type = "rcu"; /* What RCU implementation to torture. */
  68
  69module_param(nreaders, int, 0444);
  70MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
  71module_param(nfakewriters, int, 0444);
  72MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
  73module_param(stat_interval, int, 0444);
  74MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
  75module_param(verbose, bool, 0444);
  76MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
  77module_param(test_no_idle_hz, bool, 0444);
  78MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
  79module_param(shuffle_interval, int, 0444);
  80MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
  81module_param(stutter, int, 0444);
  82MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
  83module_param(irqreader, int, 0444);
  84MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers");
  85module_param(fqs_duration, int, 0444);
  86MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us)");
  87module_param(fqs_holdoff, int, 0444);
  88MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
  89module_param(fqs_stutter, int, 0444);
  90MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
  91module_param(torture_type, charp, 0444);
  92MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
  93
  94#define TORTURE_FLAG "-torture:"
  95#define PRINTK_STRING(s) \
  96        do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
  97#define VERBOSE_PRINTK_STRING(s) \
  98        do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
  99#define VERBOSE_PRINTK_ERRSTRING(s) \
 100        do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
 101
 102static char printk_buf[4096];
 103
 104static int nrealreaders;
 105static struct task_struct *writer_task;
 106static struct task_struct **fakewriter_tasks;
 107static struct task_struct **reader_tasks;
 108static struct task_struct *stats_task;
 109static struct task_struct *shuffler_task;
 110static struct task_struct *stutter_task;
 111static struct task_struct *fqs_task;
 112
 113#define RCU_TORTURE_PIPE_LEN 10
 114
 115struct rcu_torture {
 116        struct rcu_head rtort_rcu;
 117        int rtort_pipe_count;
 118        struct list_head rtort_free;
 119        int rtort_mbtest;
 120};
 121
 122static LIST_HEAD(rcu_torture_freelist);
 123static struct rcu_torture *rcu_torture_current;
 124static long rcu_torture_current_version;
 125static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
 126static DEFINE_SPINLOCK(rcu_torture_lock);
 127static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
 128        { 0 };
 129static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
 130        { 0 };
 131static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
 132static atomic_t n_rcu_torture_alloc;
 133static atomic_t n_rcu_torture_alloc_fail;
 134static atomic_t n_rcu_torture_free;
 135static atomic_t n_rcu_torture_mberror;
 136static atomic_t n_rcu_torture_error;
 137static long n_rcu_torture_timers;
 138static struct list_head rcu_torture_removed;
 139static cpumask_var_t shuffle_tmp_mask;
 140
 141static int stutter_pause_test;
 142
 143#if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
 144#define RCUTORTURE_RUNNABLE_INIT 1
 145#else
 146#define RCUTORTURE_RUNNABLE_INIT 0
 147#endif
 148int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
 149
 150/* Mediate rmmod and system shutdown.  Concurrent rmmod & shutdown illegal! */
 151
 152#define FULLSTOP_DONTSTOP 0     /* Normal operation. */
 153#define FULLSTOP_SHUTDOWN 1     /* System shutdown with rcutorture running. */
 154#define FULLSTOP_RMMOD    2     /* Normal rmmod of rcutorture. */
 155static int fullstop = FULLSTOP_RMMOD;
 156DEFINE_MUTEX(fullstop_mutex);   /* Protect fullstop transitions and spawning */
 157                                /*  of kthreads. */
 158
 159/*
 160 * Detect and respond to a system shutdown.
 161 */
 162static int
 163rcutorture_shutdown_notify(struct notifier_block *unused1,
 164                           unsigned long unused2, void *unused3)
 165{
 166        mutex_lock(&fullstop_mutex);
 167        if (fullstop == FULLSTOP_DONTSTOP)
 168                fullstop = FULLSTOP_SHUTDOWN;
 169        else
 170                printk(KERN_WARNING /* but going down anyway, so... */
 171                       "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
 172        mutex_unlock(&fullstop_mutex);
 173        return NOTIFY_DONE;
 174}
 175
 176/*
 177 * Absorb kthreads into a kernel function that won't return, so that
 178 * they won't ever access module text or data again.
 179 */
 180static void rcutorture_shutdown_absorb(char *title)
 181{
 182        if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
 183                printk(KERN_NOTICE
 184                       "rcutorture thread %s parking due to system shutdown\n",
 185                       title);
 186                schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
 187        }
 188}
 189
 190/*
 191 * Allocate an element from the rcu_tortures pool.
 192 */
 193static struct rcu_torture *
 194rcu_torture_alloc(void)
 195{
 196        struct list_head *p;
 197
 198        spin_lock_bh(&rcu_torture_lock);
 199        if (list_empty(&rcu_torture_freelist)) {
 200                atomic_inc(&n_rcu_torture_alloc_fail);
 201                spin_unlock_bh(&rcu_torture_lock);
 202                return NULL;
 203        }
 204        atomic_inc(&n_rcu_torture_alloc);
 205        p = rcu_torture_freelist.next;
 206        list_del_init(p);
 207        spin_unlock_bh(&rcu_torture_lock);
 208        return container_of(p, struct rcu_torture, rtort_free);
 209}
 210
 211/*
 212 * Free an element to the rcu_tortures pool.
 213 */
 214static void
 215rcu_torture_free(struct rcu_torture *p)
 216{
 217        atomic_inc(&n_rcu_torture_free);
 218        spin_lock_bh(&rcu_torture_lock);
 219        list_add_tail(&p->rtort_free, &rcu_torture_freelist);
 220        spin_unlock_bh(&rcu_torture_lock);
 221}
 222
 223struct rcu_random_state {
 224        unsigned long rrs_state;
 225        long rrs_count;
 226};
 227
 228#define RCU_RANDOM_MULT 39916801  /* prime */
 229#define RCU_RANDOM_ADD  479001701 /* prime */
 230#define RCU_RANDOM_REFRESH 10000
 231
 232#define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
 233
 234/*
 235 * Crude but fast random-number generator.  Uses a linear congruential
 236 * generator, with occasional help from cpu_clock().
 237 */
 238static unsigned long
 239rcu_random(struct rcu_random_state *rrsp)
 240{
 241        if (--rrsp->rrs_count < 0) {
 242                rrsp->rrs_state += (unsigned long)local_clock();
 243                rrsp->rrs_count = RCU_RANDOM_REFRESH;
 244        }
 245        rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
 246        return swahw32(rrsp->rrs_state);
 247}
 248
 249static void
 250rcu_stutter_wait(char *title)
 251{
 252        while (stutter_pause_test || !rcutorture_runnable) {
 253                if (rcutorture_runnable)
 254                        schedule_timeout_interruptible(1);
 255                else
 256                        schedule_timeout_interruptible(round_jiffies_relative(HZ));
 257                rcutorture_shutdown_absorb(title);
 258        }
 259}
 260
 261/*
 262 * Operations vector for selecting different types of tests.
 263 */
 264
 265struct rcu_torture_ops {
 266        void (*init)(void);
 267        void (*cleanup)(void);
 268        int (*readlock)(void);
 269        void (*read_delay)(struct rcu_random_state *rrsp);
 270        void (*readunlock)(int idx);
 271        int (*completed)(void);
 272        void (*deferred_free)(struct rcu_torture *p);
 273        void (*sync)(void);
 274        void (*cb_barrier)(void);
 275        void (*fqs)(void);
 276        int (*stats)(char *page);
 277        int irq_capable;
 278        char *name;
 279};
 280
 281static struct rcu_torture_ops *cur_ops;
 282
 283/*
 284 * Definitions for rcu torture testing.
 285 */
 286
 287static int rcu_torture_read_lock(void) __acquires(RCU)
 288{
 289        rcu_read_lock();
 290        return 0;
 291}
 292
 293static void rcu_read_delay(struct rcu_random_state *rrsp)
 294{
 295        const unsigned long shortdelay_us = 200;
 296        const unsigned long longdelay_ms = 50;
 297
 298        /* We want a short delay sometimes to make a reader delay the grace
 299         * period, and we want a long delay occasionally to trigger
 300         * force_quiescent_state. */
 301
 302        if (!(rcu_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
 303                mdelay(longdelay_ms);
 304        if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
 305                udelay(shortdelay_us);
 306}
 307
 308static void rcu_torture_read_unlock(int idx) __releases(RCU)
 309{
 310        rcu_read_unlock();
 311}
 312
 313static int rcu_torture_completed(void)
 314{
 315        return rcu_batches_completed();
 316}
 317
 318static void
 319rcu_torture_cb(struct rcu_head *p)
 320{
 321        int i;
 322        struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
 323
 324        if (fullstop != FULLSTOP_DONTSTOP) {
 325                /* Test is ending, just drop callbacks on the floor. */
 326                /* The next initialization will pick up the pieces. */
 327                return;
 328        }
 329        i = rp->rtort_pipe_count;
 330        if (i > RCU_TORTURE_PIPE_LEN)
 331                i = RCU_TORTURE_PIPE_LEN;
 332        atomic_inc(&rcu_torture_wcount[i]);
 333        if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
 334                rp->rtort_mbtest = 0;
 335                rcu_torture_free(rp);
 336        } else
 337                cur_ops->deferred_free(rp);
 338}
 339
 340static int rcu_no_completed(void)
 341{
 342        return 0;
 343}
 344
 345static void rcu_torture_deferred_free(struct rcu_torture *p)
 346{
 347        call_rcu(&p->rtort_rcu, rcu_torture_cb);
 348}
 349
 350static struct rcu_torture_ops rcu_ops = {
 351        .init           = NULL,
 352        .cleanup        = NULL,
 353        .readlock       = rcu_torture_read_lock,
 354        .read_delay     = rcu_read_delay,
 355        .readunlock     = rcu_torture_read_unlock,
 356        .completed      = rcu_torture_completed,
 357        .deferred_free  = rcu_torture_deferred_free,
 358        .sync           = synchronize_rcu,
 359        .cb_barrier     = rcu_barrier,
 360        .fqs            = rcu_force_quiescent_state,
 361        .stats          = NULL,
 362        .irq_capable    = 1,
 363        .name           = "rcu"
 364};
 365
 366static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
 367{
 368        int i;
 369        struct rcu_torture *rp;
 370        struct rcu_torture *rp1;
 371
 372        cur_ops->sync();
 373        list_add(&p->rtort_free, &rcu_torture_removed);
 374        list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
 375                i = rp->rtort_pipe_count;
 376                if (i > RCU_TORTURE_PIPE_LEN)
 377                        i = RCU_TORTURE_PIPE_LEN;
 378                atomic_inc(&rcu_torture_wcount[i]);
 379                if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
 380                        rp->rtort_mbtest = 0;
 381                        list_del(&rp->rtort_free);
 382                        rcu_torture_free(rp);
 383                }
 384        }
 385}
 386
 387static void rcu_sync_torture_init(void)
 388{
 389        INIT_LIST_HEAD(&rcu_torture_removed);
 390}
 391
 392static struct rcu_torture_ops rcu_sync_ops = {
 393        .init           = rcu_sync_torture_init,
 394        .cleanup        = NULL,
 395        .readlock       = rcu_torture_read_lock,
 396        .read_delay     = rcu_read_delay,
 397        .readunlock     = rcu_torture_read_unlock,
 398        .completed      = rcu_torture_completed,
 399        .deferred_free  = rcu_sync_torture_deferred_free,
 400        .sync           = synchronize_rcu,
 401        .cb_barrier     = NULL,
 402        .fqs            = rcu_force_quiescent_state,
 403        .stats          = NULL,
 404        .irq_capable    = 1,
 405        .name           = "rcu_sync"
 406};
 407
 408static struct rcu_torture_ops rcu_expedited_ops = {
 409        .init           = rcu_sync_torture_init,
 410        .cleanup        = NULL,
 411        .readlock       = rcu_torture_read_lock,
 412        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
 413        .readunlock     = rcu_torture_read_unlock,
 414        .completed      = rcu_no_completed,
 415        .deferred_free  = rcu_sync_torture_deferred_free,
 416        .sync           = synchronize_rcu_expedited,
 417        .cb_barrier     = NULL,
 418        .fqs            = rcu_force_quiescent_state,
 419        .stats          = NULL,
 420        .irq_capable    = 1,
 421        .name           = "rcu_expedited"
 422};
 423
 424/*
 425 * Definitions for rcu_bh torture testing.
 426 */
 427
 428static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
 429{
 430        rcu_read_lock_bh();
 431        return 0;
 432}
 433
 434static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
 435{
 436        rcu_read_unlock_bh();
 437}
 438
 439static int rcu_bh_torture_completed(void)
 440{
 441        return rcu_batches_completed_bh();
 442}
 443
 444static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
 445{
 446        call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
 447}
 448
 449struct rcu_bh_torture_synchronize {
 450        struct rcu_head head;
 451        struct completion completion;
 452};
 453
 454static void rcu_bh_torture_wakeme_after_cb(struct rcu_head *head)
 455{
 456        struct rcu_bh_torture_synchronize *rcu;
 457
 458        rcu = container_of(head, struct rcu_bh_torture_synchronize, head);
 459        complete(&rcu->completion);
 460}
 461
 462static void rcu_bh_torture_synchronize(void)
 463{
 464        struct rcu_bh_torture_synchronize rcu;
 465
 466        init_rcu_head_on_stack(&rcu.head);
 467        init_completion(&rcu.completion);
 468        call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb);
 469        wait_for_completion(&rcu.completion);
 470        destroy_rcu_head_on_stack(&rcu.head);
 471}
 472
 473static struct rcu_torture_ops rcu_bh_ops = {
 474        .init           = NULL,
 475        .cleanup        = NULL,
 476        .readlock       = rcu_bh_torture_read_lock,
 477        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
 478        .readunlock     = rcu_bh_torture_read_unlock,
 479        .completed      = rcu_bh_torture_completed,
 480        .deferred_free  = rcu_bh_torture_deferred_free,
 481        .sync           = rcu_bh_torture_synchronize,
 482        .cb_barrier     = rcu_barrier_bh,
 483        .fqs            = rcu_bh_force_quiescent_state,
 484        .stats          = NULL,
 485        .irq_capable    = 1,
 486        .name           = "rcu_bh"
 487};
 488
 489static struct rcu_torture_ops rcu_bh_sync_ops = {
 490        .init           = rcu_sync_torture_init,
 491        .cleanup        = NULL,
 492        .readlock       = rcu_bh_torture_read_lock,
 493        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
 494        .readunlock     = rcu_bh_torture_read_unlock,
 495        .completed      = rcu_bh_torture_completed,
 496        .deferred_free  = rcu_sync_torture_deferred_free,
 497        .sync           = rcu_bh_torture_synchronize,
 498        .cb_barrier     = NULL,
 499        .fqs            = rcu_bh_force_quiescent_state,
 500        .stats          = NULL,
 501        .irq_capable    = 1,
 502        .name           = "rcu_bh_sync"
 503};
 504
 505/*
 506 * Definitions for srcu torture testing.
 507 */
 508
 509static struct srcu_struct srcu_ctl;
 510
 511static void srcu_torture_init(void)
 512{
 513        init_srcu_struct(&srcu_ctl);
 514        rcu_sync_torture_init();
 515}
 516
 517static void srcu_torture_cleanup(void)
 518{
 519        synchronize_srcu(&srcu_ctl);
 520        cleanup_srcu_struct(&srcu_ctl);
 521}
 522
 523static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
 524{
 525        return srcu_read_lock(&srcu_ctl);
 526}
 527
 528static void srcu_read_delay(struct rcu_random_state *rrsp)
 529{
 530        long delay;
 531        const long uspertick = 1000000 / HZ;
 532        const long longdelay = 10;
 533
 534        /* We want there to be long-running readers, but not all the time. */
 535
 536        delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
 537        if (!delay)
 538                schedule_timeout_interruptible(longdelay);
 539}
 540
 541static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
 542{
 543        srcu_read_unlock(&srcu_ctl, idx);
 544}
 545
 546static int srcu_torture_completed(void)
 547{
 548        return srcu_batches_completed(&srcu_ctl);
 549}
 550
 551static void srcu_torture_synchronize(void)
 552{
 553        synchronize_srcu(&srcu_ctl);
 554}
 555
 556static int srcu_torture_stats(char *page)
 557{
 558        int cnt = 0;
 559        int cpu;
 560        int idx = srcu_ctl.completed & 0x1;
 561
 562        cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
 563                       torture_type, TORTURE_FLAG, idx);
 564        for_each_possible_cpu(cpu) {
 565                cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
 566                               per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
 567                               per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
 568        }
 569        cnt += sprintf(&page[cnt], "\n");
 570        return cnt;
 571}
 572
 573static struct rcu_torture_ops srcu_ops = {
 574        .init           = srcu_torture_init,
 575        .cleanup        = srcu_torture_cleanup,
 576        .readlock       = srcu_torture_read_lock,
 577        .read_delay     = srcu_read_delay,
 578        .readunlock     = srcu_torture_read_unlock,
 579        .completed      = srcu_torture_completed,
 580        .deferred_free  = rcu_sync_torture_deferred_free,
 581        .sync           = srcu_torture_synchronize,
 582        .cb_barrier     = NULL,
 583        .stats          = srcu_torture_stats,
 584        .name           = "srcu"
 585};
 586
 587static void srcu_torture_synchronize_expedited(void)
 588{
 589        synchronize_srcu_expedited(&srcu_ctl);
 590}
 591
 592static struct rcu_torture_ops srcu_expedited_ops = {
 593        .init           = srcu_torture_init,
 594        .cleanup        = srcu_torture_cleanup,
 595        .readlock       = srcu_torture_read_lock,
 596        .read_delay     = srcu_read_delay,
 597        .readunlock     = srcu_torture_read_unlock,
 598        .completed      = srcu_torture_completed,
 599        .deferred_free  = rcu_sync_torture_deferred_free,
 600        .sync           = srcu_torture_synchronize_expedited,
 601        .cb_barrier     = NULL,
 602        .stats          = srcu_torture_stats,
 603        .name           = "srcu_expedited"
 604};
 605
 606/*
 607 * Definitions for sched torture testing.
 608 */
 609
 610static int sched_torture_read_lock(void)
 611{
 612        preempt_disable();
 613        return 0;
 614}
 615
 616static void sched_torture_read_unlock(int idx)
 617{
 618        preempt_enable();
 619}
 620
 621static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
 622{
 623        call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
 624}
 625
 626static void sched_torture_synchronize(void)
 627{
 628        synchronize_sched();
 629}
 630
 631static struct rcu_torture_ops sched_ops = {
 632        .init           = rcu_sync_torture_init,
 633        .cleanup        = NULL,
 634        .readlock       = sched_torture_read_lock,
 635        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
 636        .readunlock     = sched_torture_read_unlock,
 637        .completed      = rcu_no_completed,
 638        .deferred_free  = rcu_sched_torture_deferred_free,
 639        .sync           = sched_torture_synchronize,
 640        .cb_barrier     = rcu_barrier_sched,
 641        .fqs            = rcu_sched_force_quiescent_state,
 642        .stats          = NULL,
 643        .irq_capable    = 1,
 644        .name           = "sched"
 645};
 646
 647static struct rcu_torture_ops sched_sync_ops = {
 648        .init           = rcu_sync_torture_init,
 649        .cleanup        = NULL,
 650        .readlock       = sched_torture_read_lock,
 651        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
 652        .readunlock     = sched_torture_read_unlock,
 653        .completed      = rcu_no_completed,
 654        .deferred_free  = rcu_sync_torture_deferred_free,
 655        .sync           = sched_torture_synchronize,
 656        .cb_barrier     = NULL,
 657        .fqs            = rcu_sched_force_quiescent_state,
 658        .stats          = NULL,
 659        .name           = "sched_sync"
 660};
 661
 662static struct rcu_torture_ops sched_expedited_ops = {
 663        .init           = rcu_sync_torture_init,
 664        .cleanup        = NULL,
 665        .readlock       = sched_torture_read_lock,
 666        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
 667        .readunlock     = sched_torture_read_unlock,
 668        .completed      = rcu_no_completed,
 669        .deferred_free  = rcu_sync_torture_deferred_free,
 670        .sync           = synchronize_sched_expedited,
 671        .cb_barrier     = NULL,
 672        .fqs            = rcu_sched_force_quiescent_state,
 673        .stats          = NULL,
 674        .irq_capable    = 1,
 675        .name           = "sched_expedited"
 676};
 677
 678/*
 679 * RCU torture force-quiescent-state kthread.  Repeatedly induces
 680 * bursts of calls to force_quiescent_state(), increasing the probability
 681 * of occurrence of some important types of race conditions.
 682 */
 683static int
 684rcu_torture_fqs(void *arg)
 685{
 686        unsigned long fqs_resume_time;
 687        int fqs_burst_remaining;
 688
 689        VERBOSE_PRINTK_STRING("rcu_torture_fqs task started");
 690        do {
 691                fqs_resume_time = jiffies + fqs_stutter * HZ;
 692                while (jiffies - fqs_resume_time > LONG_MAX) {
 693                        schedule_timeout_interruptible(1);
 694                }
 695                fqs_burst_remaining = fqs_duration;
 696                while (fqs_burst_remaining > 0) {
 697                        cur_ops->fqs();
 698                        udelay(fqs_holdoff);
 699                        fqs_burst_remaining -= fqs_holdoff;
 700                }
 701                rcu_stutter_wait("rcu_torture_fqs");
 702        } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
 703        VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping");
 704        rcutorture_shutdown_absorb("rcu_torture_fqs");
 705        while (!kthread_should_stop())
 706                schedule_timeout_uninterruptible(1);
 707        return 0;
 708}
 709
 710/*
 711 * RCU torture writer kthread.  Repeatedly substitutes a new structure
 712 * for that pointed to by rcu_torture_current, freeing the old structure
 713 * after a series of grace periods (the "pipeline").
 714 */
 715static int
 716rcu_torture_writer(void *arg)
 717{
 718        int i;
 719        long oldbatch = rcu_batches_completed();
 720        struct rcu_torture *rp;
 721        struct rcu_torture *old_rp;
 722        static DEFINE_RCU_RANDOM(rand);
 723
 724        VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
 725        set_user_nice(current, 19);
 726
 727        do {
 728                schedule_timeout_uninterruptible(1);
 729                rp = rcu_torture_alloc();
 730                if (rp == NULL)
 731                        continue;
 732                rp->rtort_pipe_count = 0;
 733                udelay(rcu_random(&rand) & 0x3ff);
 734                old_rp = rcu_torture_current;
 735                rp->rtort_mbtest = 1;
 736                rcu_assign_pointer(rcu_torture_current, rp);
 737                smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
 738                if (old_rp) {
 739                        i = old_rp->rtort_pipe_count;
 740                        if (i > RCU_TORTURE_PIPE_LEN)
 741                                i = RCU_TORTURE_PIPE_LEN;
 742                        atomic_inc(&rcu_torture_wcount[i]);
 743                        old_rp->rtort_pipe_count++;
 744                        cur_ops->deferred_free(old_rp);
 745                }
 746                rcu_torture_current_version++;
 747                oldbatch = cur_ops->completed();
 748                rcu_stutter_wait("rcu_torture_writer");
 749        } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
 750        VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
 751        rcutorture_shutdown_absorb("rcu_torture_writer");
 752        while (!kthread_should_stop())
 753                schedule_timeout_uninterruptible(1);
 754        return 0;
 755}
 756
 757/*
 758 * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
 759 * delay between calls.
 760 */
 761static int
 762rcu_torture_fakewriter(void *arg)
 763{
 764        DEFINE_RCU_RANDOM(rand);
 765
 766        VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
 767        set_user_nice(current, 19);
 768
 769        do {
 770                schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
 771                udelay(rcu_random(&rand) & 0x3ff);
 772                cur_ops->sync();
 773                rcu_stutter_wait("rcu_torture_fakewriter");
 774        } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
 775
 776        VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
 777        rcutorture_shutdown_absorb("rcu_torture_fakewriter");
 778        while (!kthread_should_stop())
 779                schedule_timeout_uninterruptible(1);
 780        return 0;
 781}
 782
 783/*
 784 * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
 785 * incrementing the corresponding element of the pipeline array.  The
 786 * counter in the element should never be greater than 1, otherwise, the
 787 * RCU implementation is broken.
 788 */
 789static void rcu_torture_timer(unsigned long unused)
 790{
 791        int idx;
 792        int completed;
 793        static DEFINE_RCU_RANDOM(rand);
 794        static DEFINE_SPINLOCK(rand_lock);
 795        struct rcu_torture *p;
 796        int pipe_count;
 797
 798        idx = cur_ops->readlock();
 799        completed = cur_ops->completed();
 800        p = rcu_dereference_check(rcu_torture_current,
 801                                  rcu_read_lock_held() ||
 802                                  rcu_read_lock_bh_held() ||
 803                                  rcu_read_lock_sched_held() ||
 804                                  srcu_read_lock_held(&srcu_ctl));
 805        if (p == NULL) {
 806                /* Leave because rcu_torture_writer is not yet underway */
 807                cur_ops->readunlock(idx);
 808                return;
 809        }
 810        if (p->rtort_mbtest == 0)
 811                atomic_inc(&n_rcu_torture_mberror);
 812        spin_lock(&rand_lock);
 813        cur_ops->read_delay(&rand);
 814        n_rcu_torture_timers++;
 815        spin_unlock(&rand_lock);
 816        preempt_disable();
 817        pipe_count = p->rtort_pipe_count;
 818        if (pipe_count > RCU_TORTURE_PIPE_LEN) {
 819                /* Should not happen, but... */
 820                pipe_count = RCU_TORTURE_PIPE_LEN;
 821        }
 822        __this_cpu_inc(rcu_torture_count[pipe_count]);
 823        completed = cur_ops->completed() - completed;
 824        if (completed > RCU_TORTURE_PIPE_LEN) {
 825                /* Should not happen, but... */
 826                completed = RCU_TORTURE_PIPE_LEN;
 827        }
 828        __this_cpu_inc(rcu_torture_batch[completed]);
 829        preempt_enable();
 830        cur_ops->readunlock(idx);
 831}
 832
 833/*
 834 * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
 835 * incrementing the corresponding element of the pipeline array.  The
 836 * counter in the element should never be greater than 1, otherwise, the
 837 * RCU implementation is broken.
 838 */
 839static int
 840rcu_torture_reader(void *arg)
 841{
 842        int completed;
 843        int idx;
 844        DEFINE_RCU_RANDOM(rand);
 845        struct rcu_torture *p;
 846        int pipe_count;
 847        struct timer_list t;
 848
 849        VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
 850        set_user_nice(current, 19);
 851        if (irqreader && cur_ops->irq_capable)
 852                setup_timer_on_stack(&t, rcu_torture_timer, 0);
 853
 854        do {
 855                if (irqreader && cur_ops->irq_capable) {
 856                        if (!timer_pending(&t))
 857                                mod_timer(&t, jiffies + 1);
 858                }
 859                idx = cur_ops->readlock();
 860                completed = cur_ops->completed();
 861                p = rcu_dereference_check(rcu_torture_current,
 862                                          rcu_read_lock_held() ||
 863                                          rcu_read_lock_bh_held() ||
 864                                          rcu_read_lock_sched_held() ||
 865                                          srcu_read_lock_held(&srcu_ctl));
 866                if (p == NULL) {
 867                        /* Wait for rcu_torture_writer to get underway */
 868                        cur_ops->readunlock(idx);
 869                        schedule_timeout_interruptible(HZ);
 870                        continue;
 871                }
 872                if (p->rtort_mbtest == 0)
 873                        atomic_inc(&n_rcu_torture_mberror);
 874                cur_ops->read_delay(&rand);
 875                preempt_disable();
 876                pipe_count = p->rtort_pipe_count;
 877                if (pipe_count > RCU_TORTURE_PIPE_LEN) {
 878                        /* Should not happen, but... */
 879                        pipe_count = RCU_TORTURE_PIPE_LEN;
 880                }
 881                __this_cpu_inc(rcu_torture_count[pipe_count]);
 882                completed = cur_ops->completed() - completed;
 883                if (completed > RCU_TORTURE_PIPE_LEN) {
 884                        /* Should not happen, but... */
 885                        completed = RCU_TORTURE_PIPE_LEN;
 886                }
 887                __this_cpu_inc(rcu_torture_batch[completed]);
 888                preempt_enable();
 889                cur_ops->readunlock(idx);
 890                schedule();
 891                rcu_stutter_wait("rcu_torture_reader");
 892        } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
 893        VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
 894        rcutorture_shutdown_absorb("rcu_torture_reader");
 895        if (irqreader && cur_ops->irq_capable)
 896                del_timer_sync(&t);
 897        while (!kthread_should_stop())
 898                schedule_timeout_uninterruptible(1);
 899        return 0;
 900}
 901
 902/*
 903 * Create an RCU-torture statistics message in the specified buffer.
 904 */
 905static int
 906rcu_torture_printk(char *page)
 907{
 908        int cnt = 0;
 909        int cpu;
 910        int i;
 911        long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
 912        long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
 913
 914        for_each_possible_cpu(cpu) {
 915                for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
 916                        pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
 917                        batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
 918                }
 919        }
 920        for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
 921                if (pipesummary[i] != 0)
 922                        break;
 923        }
 924        cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
 925        cnt += sprintf(&page[cnt],
 926                       "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
 927                       "rtmbe: %d nt: %ld",
 928                       rcu_torture_current,
 929                       rcu_torture_current_version,
 930                       list_empty(&rcu_torture_freelist),
 931                       atomic_read(&n_rcu_torture_alloc),
 932                       atomic_read(&n_rcu_torture_alloc_fail),
 933                       atomic_read(&n_rcu_torture_free),
 934                       atomic_read(&n_rcu_torture_mberror),
 935                       n_rcu_torture_timers);
 936        if (atomic_read(&n_rcu_torture_mberror) != 0)
 937                cnt += sprintf(&page[cnt], " !!!");
 938        cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
 939        if (i > 1) {
 940                cnt += sprintf(&page[cnt], "!!! ");
 941                atomic_inc(&n_rcu_torture_error);
 942                WARN_ON_ONCE(1);
 943        }
 944        cnt += sprintf(&page[cnt], "Reader Pipe: ");
 945        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
 946                cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
 947        cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
 948        cnt += sprintf(&page[cnt], "Reader Batch: ");
 949        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
 950                cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
 951        cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
 952        cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
 953        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
 954                cnt += sprintf(&page[cnt], " %d",
 955                               atomic_read(&rcu_torture_wcount[i]));
 956        }
 957        cnt += sprintf(&page[cnt], "\n");
 958        if (cur_ops->stats)
 959                cnt += cur_ops->stats(&page[cnt]);
 960        return cnt;
 961}
 962
 963/*
 964 * Print torture statistics.  Caller must ensure that there is only
 965 * one call to this function at a given time!!!  This is normally
 966 * accomplished by relying on the module system to only have one copy
 967 * of the module loaded, and then by giving the rcu_torture_stats
 968 * kthread full control (or the init/cleanup functions when rcu_torture_stats
 969 * thread is not running).
 970 */
 971static void
 972rcu_torture_stats_print(void)
 973{
 974        int cnt;
 975
 976        cnt = rcu_torture_printk(printk_buf);
 977        printk(KERN_ALERT "%s", printk_buf);
 978}
 979
 980/*
 981 * Periodically prints torture statistics, if periodic statistics printing
 982 * was specified via the stat_interval module parameter.
 983 *
 984 * No need to worry about fullstop here, since this one doesn't reference
 985 * volatile state or register callbacks.
 986 */
 987static int
 988rcu_torture_stats(void *arg)
 989{
 990        VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
 991        do {
 992                schedule_timeout_interruptible(stat_interval * HZ);
 993                rcu_torture_stats_print();
 994                rcutorture_shutdown_absorb("rcu_torture_stats");
 995        } while (!kthread_should_stop());
 996        VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
 997        return 0;
 998}
 999
1000static int rcu_idle_cpu;        /* Force all torture tasks off this CPU */
1001
1002/* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
1003 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
1004 */
1005static void rcu_torture_shuffle_tasks(void)
1006{
1007        int i;
1008
1009        cpumask_setall(shuffle_tmp_mask);
1010        get_online_cpus();
1011
1012        /* No point in shuffling if there is only one online CPU (ex: UP) */
1013        if (num_online_cpus() == 1) {
1014                put_online_cpus();
1015                return;
1016        }
1017
1018        if (rcu_idle_cpu != -1)
1019                cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
1020
1021        set_cpus_allowed_ptr(current, shuffle_tmp_mask);
1022
1023        if (reader_tasks) {
1024                for (i = 0; i < nrealreaders; i++)
1025                        if (reader_tasks[i])
1026                                set_cpus_allowed_ptr(reader_tasks[i],
1027                                                     shuffle_tmp_mask);
1028        }
1029
1030        if (fakewriter_tasks) {
1031                for (i = 0; i < nfakewriters; i++)
1032                        if (fakewriter_tasks[i])
1033                                set_cpus_allowed_ptr(fakewriter_tasks[i],
1034                                                     shuffle_tmp_mask);
1035        }
1036
1037        if (writer_task)
1038                set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
1039
1040        if (stats_task)
1041                set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
1042
1043        if (rcu_idle_cpu == -1)
1044                rcu_idle_cpu = num_online_cpus() - 1;
1045        else
1046                rcu_idle_cpu--;
1047
1048        put_online_cpus();
1049}
1050
1051/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
1052 * system to become idle at a time and cut off its timer ticks. This is meant
1053 * to test the support for such tickless idle CPU in RCU.
1054 */
1055static int
1056rcu_torture_shuffle(void *arg)
1057{
1058        VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
1059        do {
1060                schedule_timeout_interruptible(shuffle_interval * HZ);
1061                rcu_torture_shuffle_tasks();
1062                rcutorture_shutdown_absorb("rcu_torture_shuffle");
1063        } while (!kthread_should_stop());
1064        VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
1065        return 0;
1066}
1067
1068/* Cause the rcutorture test to "stutter", starting and stopping all
1069 * threads periodically.
1070 */
1071static int
1072rcu_torture_stutter(void *arg)
1073{
1074        VERBOSE_PRINTK_STRING("rcu_torture_stutter task started");
1075        do {
1076                schedule_timeout_interruptible(stutter * HZ);
1077                stutter_pause_test = 1;
1078                if (!kthread_should_stop())
1079                        schedule_timeout_interruptible(stutter * HZ);
1080                stutter_pause_test = 0;
1081                rcutorture_shutdown_absorb("rcu_torture_stutter");
1082        } while (!kthread_should_stop());
1083        VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
1084        return 0;
1085}
1086
1087static inline void
1088rcu_torture_print_module_parms(char *tag)
1089{
1090        printk(KERN_ALERT "%s" TORTURE_FLAG
1091                "--- %s: nreaders=%d nfakewriters=%d "
1092                "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1093                "shuffle_interval=%d stutter=%d irqreader=%d "
1094                "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d\n",
1095                torture_type, tag, nrealreaders, nfakewriters,
1096                stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1097                stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter);
1098}
1099
1100static struct notifier_block rcutorture_nb = {
1101        .notifier_call = rcutorture_shutdown_notify,
1102};
1103
1104static void
1105rcu_torture_cleanup(void)
1106{
1107        int i;
1108
1109        mutex_lock(&fullstop_mutex);
1110        if (fullstop == FULLSTOP_SHUTDOWN) {
1111                printk(KERN_WARNING /* but going down anyway, so... */
1112                       "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
1113                mutex_unlock(&fullstop_mutex);
1114                schedule_timeout_uninterruptible(10);
1115                if (cur_ops->cb_barrier != NULL)
1116                        cur_ops->cb_barrier();
1117                return;
1118        }
1119        fullstop = FULLSTOP_RMMOD;
1120        mutex_unlock(&fullstop_mutex);
1121        unregister_reboot_notifier(&rcutorture_nb);
1122        if (stutter_task) {
1123                VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
1124                kthread_stop(stutter_task);
1125        }
1126        stutter_task = NULL;
1127        if (shuffler_task) {
1128                VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
1129                kthread_stop(shuffler_task);
1130                free_cpumask_var(shuffle_tmp_mask);
1131        }
1132        shuffler_task = NULL;
1133
1134        if (writer_task) {
1135                VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
1136                kthread_stop(writer_task);
1137        }
1138        writer_task = NULL;
1139
1140        if (reader_tasks) {
1141                for (i = 0; i < nrealreaders; i++) {
1142                        if (reader_tasks[i]) {
1143                                VERBOSE_PRINTK_STRING(
1144                                        "Stopping rcu_torture_reader task");
1145                                kthread_stop(reader_tasks[i]);
1146                        }
1147                        reader_tasks[i] = NULL;
1148                }
1149                kfree(reader_tasks);
1150                reader_tasks = NULL;
1151        }
1152        rcu_torture_current = NULL;
1153
1154        if (fakewriter_tasks) {
1155                for (i = 0; i < nfakewriters; i++) {
1156                        if (fakewriter_tasks[i]) {
1157                                VERBOSE_PRINTK_STRING(
1158                                        "Stopping rcu_torture_fakewriter task");
1159                                kthread_stop(fakewriter_tasks[i]);
1160                        }
1161                        fakewriter_tasks[i] = NULL;
1162                }
1163                kfree(fakewriter_tasks);
1164                fakewriter_tasks = NULL;
1165        }
1166
1167        if (stats_task) {
1168                VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
1169                kthread_stop(stats_task);
1170        }
1171        stats_task = NULL;
1172
1173        if (fqs_task) {
1174                VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task");
1175                kthread_stop(fqs_task);
1176        }
1177        fqs_task = NULL;
1178
1179        /* Wait for all RCU callbacks to fire.  */
1180
1181        if (cur_ops->cb_barrier != NULL)
1182                cur_ops->cb_barrier();
1183
1184        rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
1185
1186        if (cur_ops->cleanup)
1187                cur_ops->cleanup();
1188        if (atomic_read(&n_rcu_torture_error))
1189                rcu_torture_print_module_parms("End of test: FAILURE");
1190        else
1191                rcu_torture_print_module_parms("End of test: SUCCESS");
1192}
1193
1194static int __init
1195rcu_torture_init(void)
1196{
1197        int i;
1198        int cpu;
1199        int firsterr = 0;
1200        static struct rcu_torture_ops *torture_ops[] =
1201                { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
1202                  &rcu_bh_ops, &rcu_bh_sync_ops,
1203                  &srcu_ops, &srcu_expedited_ops,
1204                  &sched_ops, &sched_sync_ops, &sched_expedited_ops, };
1205
1206        mutex_lock(&fullstop_mutex);
1207
1208        /* Process args and tell the world that the torturer is on the job. */
1209        for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1210                cur_ops = torture_ops[i];
1211                if (strcmp(torture_type, cur_ops->name) == 0)
1212                        break;
1213        }
1214        if (i == ARRAY_SIZE(torture_ops)) {
1215                printk(KERN_ALERT "rcu-torture: invalid torture type: \"%s\"\n",
1216                       torture_type);
1217                printk(KERN_ALERT "rcu-torture types:");
1218                for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1219                        printk(KERN_ALERT " %s", torture_ops[i]->name);
1220                printk(KERN_ALERT "\n");
1221                mutex_unlock(&fullstop_mutex);
1222                return -EINVAL;
1223        }
1224        if (cur_ops->fqs == NULL && fqs_duration != 0) {
1225                printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero "
1226                                  "fqs_duration, fqs disabled.\n");
1227                fqs_duration = 0;
1228        }
1229        if (cur_ops->init)
1230                cur_ops->init(); /* no "goto unwind" prior to this point!!! */
1231
1232        if (nreaders >= 0)
1233                nrealreaders = nreaders;
1234        else
1235                nrealreaders = 2 * num_online_cpus();
1236        rcu_torture_print_module_parms("Start of test");
1237        fullstop = FULLSTOP_DONTSTOP;
1238
1239        /* Set up the freelist. */
1240
1241        INIT_LIST_HEAD(&rcu_torture_freelist);
1242        for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
1243                rcu_tortures[i].rtort_mbtest = 0;
1244                list_add_tail(&rcu_tortures[i].rtort_free,
1245                              &rcu_torture_freelist);
1246        }
1247
1248        /* Initialize the statistics so that each run gets its own numbers. */
1249
1250        rcu_torture_current = NULL;
1251        rcu_torture_current_version = 0;
1252        atomic_set(&n_rcu_torture_alloc, 0);
1253        atomic_set(&n_rcu_torture_alloc_fail, 0);
1254        atomic_set(&n_rcu_torture_free, 0);
1255        atomic_set(&n_rcu_torture_mberror, 0);
1256        atomic_set(&n_rcu_torture_error, 0);
1257        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1258                atomic_set(&rcu_torture_wcount[i], 0);
1259        for_each_possible_cpu(cpu) {
1260                for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1261                        per_cpu(rcu_torture_count, cpu)[i] = 0;
1262                        per_cpu(rcu_torture_batch, cpu)[i] = 0;
1263                }
1264        }
1265
1266        /* Start up the kthreads. */
1267
1268        VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
1269        writer_task = kthread_run(rcu_torture_writer, NULL,
1270                                  "rcu_torture_writer");
1271        if (IS_ERR(writer_task)) {
1272                firsterr = PTR_ERR(writer_task);
1273                VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
1274                writer_task = NULL;
1275                goto unwind;
1276        }
1277        fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
1278                                   GFP_KERNEL);
1279        if (fakewriter_tasks == NULL) {
1280                VERBOSE_PRINTK_ERRSTRING("out of memory");
1281                firsterr = -ENOMEM;
1282                goto unwind;
1283        }
1284        for (i = 0; i < nfakewriters; i++) {
1285                VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
1286                fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
1287                                                  "rcu_torture_fakewriter");
1288                if (IS_ERR(fakewriter_tasks[i])) {
1289                        firsterr = PTR_ERR(fakewriter_tasks[i]);
1290                        VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
1291                        fakewriter_tasks[i] = NULL;
1292                        goto unwind;
1293                }
1294        }
1295        reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
1296                               GFP_KERNEL);
1297        if (reader_tasks == NULL) {
1298                VERBOSE_PRINTK_ERRSTRING("out of memory");
1299                firsterr = -ENOMEM;
1300                goto unwind;
1301        }
1302        for (i = 0; i < nrealreaders; i++) {
1303                VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
1304                reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
1305                                              "rcu_torture_reader");
1306                if (IS_ERR(reader_tasks[i])) {
1307                        firsterr = PTR_ERR(reader_tasks[i]);
1308                        VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
1309                        reader_tasks[i] = NULL;
1310                        goto unwind;
1311                }
1312        }
1313        if (stat_interval > 0) {
1314                VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
1315                stats_task = kthread_run(rcu_torture_stats, NULL,
1316                                        "rcu_torture_stats");
1317                if (IS_ERR(stats_task)) {
1318                        firsterr = PTR_ERR(stats_task);
1319                        VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
1320                        stats_task = NULL;
1321                        goto unwind;
1322                }
1323        }
1324        if (test_no_idle_hz) {
1325                rcu_idle_cpu = num_online_cpus() - 1;
1326
1327                if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
1328                        firsterr = -ENOMEM;
1329                        VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
1330                        goto unwind;
1331                }
1332
1333                /* Create the shuffler thread */
1334                shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
1335                                          "rcu_torture_shuffle");
1336                if (IS_ERR(shuffler_task)) {
1337                        free_cpumask_var(shuffle_tmp_mask);
1338                        firsterr = PTR_ERR(shuffler_task);
1339                        VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
1340                        shuffler_task = NULL;
1341                        goto unwind;
1342                }
1343        }
1344        if (stutter < 0)
1345                stutter = 0;
1346        if (stutter) {
1347                /* Create the stutter thread */
1348                stutter_task = kthread_run(rcu_torture_stutter, NULL,
1349                                          "rcu_torture_stutter");
1350                if (IS_ERR(stutter_task)) {
1351                        firsterr = PTR_ERR(stutter_task);
1352                        VERBOSE_PRINTK_ERRSTRING("Failed to create stutter");
1353                        stutter_task = NULL;
1354                        goto unwind;
1355                }
1356        }
1357        if (fqs_duration < 0)
1358                fqs_duration = 0;
1359        if (fqs_duration) {
1360                /* Create the stutter thread */
1361                fqs_task = kthread_run(rcu_torture_fqs, NULL,
1362                                       "rcu_torture_fqs");
1363                if (IS_ERR(fqs_task)) {
1364                        firsterr = PTR_ERR(fqs_task);
1365                        VERBOSE_PRINTK_ERRSTRING("Failed to create fqs");
1366                        fqs_task = NULL;
1367                        goto unwind;
1368                }
1369        }
1370        register_reboot_notifier(&rcutorture_nb);
1371        mutex_unlock(&fullstop_mutex);
1372        return 0;
1373
1374unwind:
1375        mutex_unlock(&fullstop_mutex);
1376        rcu_torture_cleanup();
1377        return firsterr;
1378}
1379
1380module_init(rcu_torture_init);
1381module_exit(rcu_torture_cleanup);
1382