linux/kernel/rcutorture.c
<<
>>
Prefs
   1/*
   2 * Read-Copy Update module-based torture test facility
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17 *
  18 * Copyright (C) IBM Corporation, 2005, 2006
  19 *
  20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
  21 *          Josh Triplett <josh@freedesktop.org>
  22 *
  23 * See also:  Documentation/RCU/torture.txt
  24 */
  25#include <linux/types.h>
  26#include <linux/kernel.h>
  27#include <linux/init.h>
  28#include <linux/module.h>
  29#include <linux/kthread.h>
  30#include <linux/err.h>
  31#include <linux/spinlock.h>
  32#include <linux/smp.h>
  33#include <linux/rcupdate.h>
  34#include <linux/interrupt.h>
  35#include <linux/sched.h>
  36#include <asm/atomic.h>
  37#include <linux/bitops.h>
  38#include <linux/module.h>
  39#include <linux/completion.h>
  40#include <linux/moduleparam.h>
  41#include <linux/percpu.h>
  42#include <linux/notifier.h>
  43#include <linux/cpu.h>
  44#include <linux/random.h>
  45#include <linux/delay.h>
  46#include <linux/byteorder/swabb.h>
  47#include <linux/stat.h>
  48#include <linux/srcu.h>
  49
  50MODULE_LICENSE("GPL");
  51MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
  52              "Josh Triplett <josh@freedesktop.org>");
  53
  54static int nreaders = -1;       /* # reader threads, defaults to 2*ncpus */
  55static int nfakewriters = 4;    /* # fake writer threads */
  56static int stat_interval;       /* Interval between stats, in seconds. */
  57                                /*  Defaults to "only at end of test". */
  58static int verbose;             /* Print more debug info. */
  59static int test_no_idle_hz;     /* Test RCU's support for tickless idle CPUs. */
  60static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
  61static char *torture_type = "rcu"; /* What RCU implementation to torture. */
  62
  63module_param(nreaders, int, 0);
  64MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
  65module_param(nfakewriters, int, 0);
  66MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
  67module_param(stat_interval, int, 0);
  68MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
  69module_param(verbose, bool, 0);
  70MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
  71module_param(test_no_idle_hz, bool, 0);
  72MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
  73module_param(shuffle_interval, int, 0);
  74MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
  75module_param(torture_type, charp, 0);
  76MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
  77
  78#define TORTURE_FLAG "-torture:"
  79#define PRINTK_STRING(s) \
  80        do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
  81#define VERBOSE_PRINTK_STRING(s) \
  82        do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
  83#define VERBOSE_PRINTK_ERRSTRING(s) \
  84        do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
  85
  86static char printk_buf[4096];
  87
  88static int nrealreaders;
  89static struct task_struct *writer_task;
  90static struct task_struct **fakewriter_tasks;
  91static struct task_struct **reader_tasks;
  92static struct task_struct *stats_task;
  93static struct task_struct *shuffler_task;
  94
  95#define RCU_TORTURE_PIPE_LEN 10
  96
  97struct rcu_torture {
  98        struct rcu_head rtort_rcu;
  99        int rtort_pipe_count;
 100        struct list_head rtort_free;
 101        int rtort_mbtest;
 102};
 103
 104static int fullstop = 0;        /* stop generating callbacks at test end. */
 105static LIST_HEAD(rcu_torture_freelist);
 106static struct rcu_torture *rcu_torture_current = NULL;
 107static long rcu_torture_current_version = 0;
 108static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
 109static DEFINE_SPINLOCK(rcu_torture_lock);
 110static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
 111        { 0 };
 112static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
 113        { 0 };
 114static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
 115static atomic_t n_rcu_torture_alloc;
 116static atomic_t n_rcu_torture_alloc_fail;
 117static atomic_t n_rcu_torture_free;
 118static atomic_t n_rcu_torture_mberror;
 119static atomic_t n_rcu_torture_error;
 120static struct list_head rcu_torture_removed;
 121
 122/*
 123 * Allocate an element from the rcu_tortures pool.
 124 */
 125static struct rcu_torture *
 126rcu_torture_alloc(void)
 127{
 128        struct list_head *p;
 129
 130        spin_lock_bh(&rcu_torture_lock);
 131        if (list_empty(&rcu_torture_freelist)) {
 132                atomic_inc(&n_rcu_torture_alloc_fail);
 133                spin_unlock_bh(&rcu_torture_lock);
 134                return NULL;
 135        }
 136        atomic_inc(&n_rcu_torture_alloc);
 137        p = rcu_torture_freelist.next;
 138        list_del_init(p);
 139        spin_unlock_bh(&rcu_torture_lock);
 140        return container_of(p, struct rcu_torture, rtort_free);
 141}
 142
 143/*
 144 * Free an element to the rcu_tortures pool.
 145 */
 146static void
 147rcu_torture_free(struct rcu_torture *p)
 148{
 149        atomic_inc(&n_rcu_torture_free);
 150        spin_lock_bh(&rcu_torture_lock);
 151        list_add_tail(&p->rtort_free, &rcu_torture_freelist);
 152        spin_unlock_bh(&rcu_torture_lock);
 153}
 154
 155struct rcu_random_state {
 156        unsigned long rrs_state;
 157        long rrs_count;
 158};
 159
 160#define RCU_RANDOM_MULT 39916801  /* prime */
 161#define RCU_RANDOM_ADD  479001701 /* prime */
 162#define RCU_RANDOM_REFRESH 10000
 163
 164#define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
 165
 166/*
 167 * Crude but fast random-number generator.  Uses a linear congruential
 168 * generator, with occasional help from get_random_bytes().
 169 */
 170static unsigned long
 171rcu_random(struct rcu_random_state *rrsp)
 172{
 173        long refresh;
 174
 175        if (--rrsp->rrs_count < 0) {
 176                get_random_bytes(&refresh, sizeof(refresh));
 177                rrsp->rrs_state += refresh;
 178                rrsp->rrs_count = RCU_RANDOM_REFRESH;
 179        }
 180        rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
 181        return swahw32(rrsp->rrs_state);
 182}
 183
 184/*
 185 * Operations vector for selecting different types of tests.
 186 */
 187
 188struct rcu_torture_ops {
 189        void (*init)(void);
 190        void (*cleanup)(void);
 191        int (*readlock)(void);
 192        void (*readdelay)(struct rcu_random_state *rrsp);
 193        void (*readunlock)(int idx);
 194        int (*completed)(void);
 195        void (*deferredfree)(struct rcu_torture *p);
 196        void (*sync)(void);
 197        int (*stats)(char *page);
 198        char *name;
 199};
 200static struct rcu_torture_ops *cur_ops = NULL;
 201
 202/*
 203 * Definitions for rcu torture testing.
 204 */
 205
 206static int rcu_torture_read_lock(void) __acquires(RCU)
 207{
 208        rcu_read_lock();
 209        return 0;
 210}
 211
 212static void rcu_read_delay(struct rcu_random_state *rrsp)
 213{
 214        long delay;
 215        const long longdelay = 200;
 216
 217        /* We want there to be long-running readers, but not all the time. */
 218
 219        delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay);
 220        if (!delay)
 221                udelay(longdelay);
 222}
 223
 224static void rcu_torture_read_unlock(int idx) __releases(RCU)
 225{
 226        rcu_read_unlock();
 227}
 228
 229static int rcu_torture_completed(void)
 230{
 231        return rcu_batches_completed();
 232}
 233
 234static void
 235rcu_torture_cb(struct rcu_head *p)
 236{
 237        int i;
 238        struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
 239
 240        if (fullstop) {
 241                /* Test is ending, just drop callbacks on the floor. */
 242                /* The next initialization will pick up the pieces. */
 243                return;
 244        }
 245        i = rp->rtort_pipe_count;
 246        if (i > RCU_TORTURE_PIPE_LEN)
 247                i = RCU_TORTURE_PIPE_LEN;
 248        atomic_inc(&rcu_torture_wcount[i]);
 249        if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
 250                rp->rtort_mbtest = 0;
 251                rcu_torture_free(rp);
 252        } else
 253                cur_ops->deferredfree(rp);
 254}
 255
 256static void rcu_torture_deferred_free(struct rcu_torture *p)
 257{
 258        call_rcu(&p->rtort_rcu, rcu_torture_cb);
 259}
 260
 261static struct rcu_torture_ops rcu_ops = {
 262        .init = NULL,
 263        .cleanup = NULL,
 264        .readlock = rcu_torture_read_lock,
 265        .readdelay = rcu_read_delay,
 266        .readunlock = rcu_torture_read_unlock,
 267        .completed = rcu_torture_completed,
 268        .deferredfree = rcu_torture_deferred_free,
 269        .sync = synchronize_rcu,
 270        .stats = NULL,
 271        .name = "rcu"
 272};
 273
 274static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
 275{
 276        int i;
 277        struct rcu_torture *rp;
 278        struct rcu_torture *rp1;
 279
 280        cur_ops->sync();
 281        list_add(&p->rtort_free, &rcu_torture_removed);
 282        list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
 283                i = rp->rtort_pipe_count;
 284                if (i > RCU_TORTURE_PIPE_LEN)
 285                        i = RCU_TORTURE_PIPE_LEN;
 286                atomic_inc(&rcu_torture_wcount[i]);
 287                if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
 288                        rp->rtort_mbtest = 0;
 289                        list_del(&rp->rtort_free);
 290                        rcu_torture_free(rp);
 291                }
 292        }
 293}
 294
 295static void rcu_sync_torture_init(void)
 296{
 297        INIT_LIST_HEAD(&rcu_torture_removed);
 298}
 299
 300static struct rcu_torture_ops rcu_sync_ops = {
 301        .init = rcu_sync_torture_init,
 302        .cleanup = NULL,
 303        .readlock = rcu_torture_read_lock,
 304        .readdelay = rcu_read_delay,
 305        .readunlock = rcu_torture_read_unlock,
 306        .completed = rcu_torture_completed,
 307        .deferredfree = rcu_sync_torture_deferred_free,
 308        .sync = synchronize_rcu,
 309        .stats = NULL,
 310        .name = "rcu_sync"
 311};
 312
 313/*
 314 * Definitions for rcu_bh torture testing.
 315 */
 316
 317static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
 318{
 319        rcu_read_lock_bh();
 320        return 0;
 321}
 322
 323static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
 324{
 325        rcu_read_unlock_bh();
 326}
 327
 328static int rcu_bh_torture_completed(void)
 329{
 330        return rcu_batches_completed_bh();
 331}
 332
 333static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
 334{
 335        call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
 336}
 337
 338struct rcu_bh_torture_synchronize {
 339        struct rcu_head head;
 340        struct completion completion;
 341};
 342
 343static void rcu_bh_torture_wakeme_after_cb(struct rcu_head *head)
 344{
 345        struct rcu_bh_torture_synchronize *rcu;
 346
 347        rcu = container_of(head, struct rcu_bh_torture_synchronize, head);
 348        complete(&rcu->completion);
 349}
 350
 351static void rcu_bh_torture_synchronize(void)
 352{
 353        struct rcu_bh_torture_synchronize rcu;
 354
 355        init_completion(&rcu.completion);
 356        call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb);
 357        wait_for_completion(&rcu.completion);
 358}
 359
 360static struct rcu_torture_ops rcu_bh_ops = {
 361        .init = NULL,
 362        .cleanup = NULL,
 363        .readlock = rcu_bh_torture_read_lock,
 364        .readdelay = rcu_read_delay,  /* just reuse rcu's version. */
 365        .readunlock = rcu_bh_torture_read_unlock,
 366        .completed = rcu_bh_torture_completed,
 367        .deferredfree = rcu_bh_torture_deferred_free,
 368        .sync = rcu_bh_torture_synchronize,
 369        .stats = NULL,
 370        .name = "rcu_bh"
 371};
 372
 373static struct rcu_torture_ops rcu_bh_sync_ops = {
 374        .init = rcu_sync_torture_init,
 375        .cleanup = NULL,
 376        .readlock = rcu_bh_torture_read_lock,
 377        .readdelay = rcu_read_delay,  /* just reuse rcu's version. */
 378        .readunlock = rcu_bh_torture_read_unlock,
 379        .completed = rcu_bh_torture_completed,
 380        .deferredfree = rcu_sync_torture_deferred_free,
 381        .sync = rcu_bh_torture_synchronize,
 382        .stats = NULL,
 383        .name = "rcu_bh_sync"
 384};
 385
 386/*
 387 * Definitions for srcu torture testing.
 388 */
 389
 390static struct srcu_struct srcu_ctl;
 391
 392static void srcu_torture_init(void)
 393{
 394        init_srcu_struct(&srcu_ctl);
 395        rcu_sync_torture_init();
 396}
 397
 398static void srcu_torture_cleanup(void)
 399{
 400        synchronize_srcu(&srcu_ctl);
 401        cleanup_srcu_struct(&srcu_ctl);
 402}
 403
 404static int srcu_torture_read_lock(void)
 405{
 406        return srcu_read_lock(&srcu_ctl);
 407}
 408
 409static void srcu_read_delay(struct rcu_random_state *rrsp)
 410{
 411        long delay;
 412        const long uspertick = 1000000 / HZ;
 413        const long longdelay = 10;
 414
 415        /* We want there to be long-running readers, but not all the time. */
 416
 417        delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
 418        if (!delay)
 419                schedule_timeout_interruptible(longdelay);
 420}
 421
 422static void srcu_torture_read_unlock(int idx)
 423{
 424        srcu_read_unlock(&srcu_ctl, idx);
 425}
 426
 427static int srcu_torture_completed(void)
 428{
 429        return srcu_batches_completed(&srcu_ctl);
 430}
 431
 432static void srcu_torture_synchronize(void)
 433{
 434        synchronize_srcu(&srcu_ctl);
 435}
 436
 437static int srcu_torture_stats(char *page)
 438{
 439        int cnt = 0;
 440        int cpu;
 441        int idx = srcu_ctl.completed & 0x1;
 442
 443        cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
 444                       torture_type, TORTURE_FLAG, idx);
 445        for_each_possible_cpu(cpu) {
 446                cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
 447                               per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
 448                               per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
 449        }
 450        cnt += sprintf(&page[cnt], "\n");
 451        return cnt;
 452}
 453
 454static struct rcu_torture_ops srcu_ops = {
 455        .init = srcu_torture_init,
 456        .cleanup = srcu_torture_cleanup,
 457        .readlock = srcu_torture_read_lock,
 458        .readdelay = srcu_read_delay,
 459        .readunlock = srcu_torture_read_unlock,
 460        .completed = srcu_torture_completed,
 461        .deferredfree = rcu_sync_torture_deferred_free,
 462        .sync = srcu_torture_synchronize,
 463        .stats = srcu_torture_stats,
 464        .name = "srcu"
 465};
 466
 467/*
 468 * Definitions for sched torture testing.
 469 */
 470
 471static int sched_torture_read_lock(void)
 472{
 473        preempt_disable();
 474        return 0;
 475}
 476
 477static void sched_torture_read_unlock(int idx)
 478{
 479        preempt_enable();
 480}
 481
 482static int sched_torture_completed(void)
 483{
 484        return 0;
 485}
 486
 487static void sched_torture_synchronize(void)
 488{
 489        synchronize_sched();
 490}
 491
 492static struct rcu_torture_ops sched_ops = {
 493        .init = rcu_sync_torture_init,
 494        .cleanup = NULL,
 495        .readlock = sched_torture_read_lock,
 496        .readdelay = rcu_read_delay,  /* just reuse rcu's version. */
 497        .readunlock = sched_torture_read_unlock,
 498        .completed = sched_torture_completed,
 499        .deferredfree = rcu_sync_torture_deferred_free,
 500        .sync = sched_torture_synchronize,
 501        .stats = NULL,
 502        .name = "sched"
 503};
 504
 505static struct rcu_torture_ops *torture_ops[] =
 506        { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, &srcu_ops,
 507          &sched_ops, NULL };
 508
 509/*
 510 * RCU torture writer kthread.  Repeatedly substitutes a new structure
 511 * for that pointed to by rcu_torture_current, freeing the old structure
 512 * after a series of grace periods (the "pipeline").
 513 */
 514static int
 515rcu_torture_writer(void *arg)
 516{
 517        int i;
 518        long oldbatch = rcu_batches_completed();
 519        struct rcu_torture *rp;
 520        struct rcu_torture *old_rp;
 521        static DEFINE_RCU_RANDOM(rand);
 522
 523        VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
 524        set_user_nice(current, 19);
 525
 526        do {
 527                schedule_timeout_uninterruptible(1);
 528                if ((rp = rcu_torture_alloc()) == NULL)
 529                        continue;
 530                rp->rtort_pipe_count = 0;
 531                udelay(rcu_random(&rand) & 0x3ff);
 532                old_rp = rcu_torture_current;
 533                rp->rtort_mbtest = 1;
 534                rcu_assign_pointer(rcu_torture_current, rp);
 535                smp_wmb();
 536                if (old_rp != NULL) {
 537                        i = old_rp->rtort_pipe_count;
 538                        if (i > RCU_TORTURE_PIPE_LEN)
 539                                i = RCU_TORTURE_PIPE_LEN;
 540                        atomic_inc(&rcu_torture_wcount[i]);
 541                        old_rp->rtort_pipe_count++;
 542                        cur_ops->deferredfree(old_rp);
 543                }
 544                rcu_torture_current_version++;
 545                oldbatch = cur_ops->completed();
 546        } while (!kthread_should_stop() && !fullstop);
 547        VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
 548        while (!kthread_should_stop())
 549                schedule_timeout_uninterruptible(1);
 550        return 0;
 551}
 552
 553/*
 554 * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
 555 * delay between calls.
 556 */
 557static int
 558rcu_torture_fakewriter(void *arg)
 559{
 560        DEFINE_RCU_RANDOM(rand);
 561
 562        VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
 563        set_user_nice(current, 19);
 564
 565        do {
 566                schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
 567                udelay(rcu_random(&rand) & 0x3ff);
 568                cur_ops->sync();
 569        } while (!kthread_should_stop() && !fullstop);
 570
 571        VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
 572        while (!kthread_should_stop())
 573                schedule_timeout_uninterruptible(1);
 574        return 0;
 575}
 576
 577/*
 578 * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
 579 * incrementing the corresponding element of the pipeline array.  The
 580 * counter in the element should never be greater than 1, otherwise, the
 581 * RCU implementation is broken.
 582 */
 583static int
 584rcu_torture_reader(void *arg)
 585{
 586        int completed;
 587        int idx;
 588        DEFINE_RCU_RANDOM(rand);
 589        struct rcu_torture *p;
 590        int pipe_count;
 591
 592        VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
 593        set_user_nice(current, 19);
 594
 595        do {
 596                idx = cur_ops->readlock();
 597                completed = cur_ops->completed();
 598                p = rcu_dereference(rcu_torture_current);
 599                if (p == NULL) {
 600                        /* Wait for rcu_torture_writer to get underway */
 601                        cur_ops->readunlock(idx);
 602                        schedule_timeout_interruptible(HZ);
 603                        continue;
 604                }
 605                if (p->rtort_mbtest == 0)
 606                        atomic_inc(&n_rcu_torture_mberror);
 607                cur_ops->readdelay(&rand);
 608                preempt_disable();
 609                pipe_count = p->rtort_pipe_count;
 610                if (pipe_count > RCU_TORTURE_PIPE_LEN) {
 611                        /* Should not happen, but... */
 612                        pipe_count = RCU_TORTURE_PIPE_LEN;
 613                }
 614                ++__get_cpu_var(rcu_torture_count)[pipe_count];
 615                completed = cur_ops->completed() - completed;
 616                if (completed > RCU_TORTURE_PIPE_LEN) {
 617                        /* Should not happen, but... */
 618                        completed = RCU_TORTURE_PIPE_LEN;
 619                }
 620                ++__get_cpu_var(rcu_torture_batch)[completed];
 621                preempt_enable();
 622                cur_ops->readunlock(idx);
 623                schedule();
 624        } while (!kthread_should_stop() && !fullstop);
 625        VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
 626        while (!kthread_should_stop())
 627                schedule_timeout_uninterruptible(1);
 628        return 0;
 629}
 630
 631/*
 632 * Create an RCU-torture statistics message in the specified buffer.
 633 */
 634static int
 635rcu_torture_printk(char *page)
 636{
 637        int cnt = 0;
 638        int cpu;
 639        int i;
 640        long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
 641        long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
 642
 643        for_each_possible_cpu(cpu) {
 644                for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
 645                        pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
 646                        batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
 647                }
 648        }
 649        for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
 650                if (pipesummary[i] != 0)
 651                        break;
 652        }
 653        cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
 654        cnt += sprintf(&page[cnt],
 655                       "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
 656                       "rtmbe: %d",
 657                       rcu_torture_current,
 658                       rcu_torture_current_version,
 659                       list_empty(&rcu_torture_freelist),
 660                       atomic_read(&n_rcu_torture_alloc),
 661                       atomic_read(&n_rcu_torture_alloc_fail),
 662                       atomic_read(&n_rcu_torture_free),
 663                       atomic_read(&n_rcu_torture_mberror));
 664        if (atomic_read(&n_rcu_torture_mberror) != 0)
 665                cnt += sprintf(&page[cnt], " !!!");
 666        cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
 667        if (i > 1) {
 668                cnt += sprintf(&page[cnt], "!!! ");
 669                atomic_inc(&n_rcu_torture_error);
 670        }
 671        cnt += sprintf(&page[cnt], "Reader Pipe: ");
 672        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
 673                cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
 674        cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
 675        cnt += sprintf(&page[cnt], "Reader Batch: ");
 676        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
 677                cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
 678        cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
 679        cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
 680        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
 681                cnt += sprintf(&page[cnt], " %d",
 682                               atomic_read(&rcu_torture_wcount[i]));
 683        }
 684        cnt += sprintf(&page[cnt], "\n");
 685        if (cur_ops->stats != NULL)
 686                cnt += cur_ops->stats(&page[cnt]);
 687        return cnt;
 688}
 689
 690/*
 691 * Print torture statistics.  Caller must ensure that there is only
 692 * one call to this function at a given time!!!  This is normally
 693 * accomplished by relying on the module system to only have one copy
 694 * of the module loaded, and then by giving the rcu_torture_stats
 695 * kthread full control (or the init/cleanup functions when rcu_torture_stats
 696 * thread is not running).
 697 */
 698static void
 699rcu_torture_stats_print(void)
 700{
 701        int cnt;
 702
 703        cnt = rcu_torture_printk(printk_buf);
 704        printk(KERN_ALERT "%s", printk_buf);
 705}
 706
 707/*
 708 * Periodically prints torture statistics, if periodic statistics printing
 709 * was specified via the stat_interval module parameter.
 710 *
 711 * No need to worry about fullstop here, since this one doesn't reference
 712 * volatile state or register callbacks.
 713 */
 714static int
 715rcu_torture_stats(void *arg)
 716{
 717        VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
 718        do {
 719                schedule_timeout_interruptible(stat_interval * HZ);
 720                rcu_torture_stats_print();
 721        } while (!kthread_should_stop());
 722        VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
 723        return 0;
 724}
 725
 726static int rcu_idle_cpu;        /* Force all torture tasks off this CPU */
 727
 728/* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
 729 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
 730 */
 731static void rcu_torture_shuffle_tasks(void)
 732{
 733        cpumask_t tmp_mask = CPU_MASK_ALL;
 734        int i;
 735
 736        lock_cpu_hotplug();
 737
 738        /* No point in shuffling if there is only one online CPU (ex: UP) */
 739        if (num_online_cpus() == 1) {
 740                unlock_cpu_hotplug();
 741                return;
 742        }
 743
 744        if (rcu_idle_cpu != -1)
 745                cpu_clear(rcu_idle_cpu, tmp_mask);
 746
 747        set_cpus_allowed(current, tmp_mask);
 748
 749        if (reader_tasks != NULL) {
 750                for (i = 0; i < nrealreaders; i++)
 751                        if (reader_tasks[i])
 752                                set_cpus_allowed(reader_tasks[i], tmp_mask);
 753        }
 754
 755        if (fakewriter_tasks != NULL) {
 756                for (i = 0; i < nfakewriters; i++)
 757                        if (fakewriter_tasks[i])
 758                                set_cpus_allowed(fakewriter_tasks[i], tmp_mask);
 759        }
 760
 761        if (writer_task)
 762                set_cpus_allowed(writer_task, tmp_mask);
 763
 764        if (stats_task)
 765                set_cpus_allowed(stats_task, tmp_mask);
 766
 767        if (rcu_idle_cpu == -1)
 768                rcu_idle_cpu = num_online_cpus() - 1;
 769        else
 770                rcu_idle_cpu--;
 771
 772        unlock_cpu_hotplug();
 773}
 774
 775/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
 776 * system to become idle at a time and cut off its timer ticks. This is meant
 777 * to test the support for such tickless idle CPU in RCU.
 778 */
 779static int
 780rcu_torture_shuffle(void *arg)
 781{
 782        VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
 783        do {
 784                schedule_timeout_interruptible(shuffle_interval * HZ);
 785                rcu_torture_shuffle_tasks();
 786        } while (!kthread_should_stop());
 787        VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
 788        return 0;
 789}
 790
 791static inline void
 792rcu_torture_print_module_parms(char *tag)
 793{
 794        printk(KERN_ALERT "%s" TORTURE_FLAG
 795                "--- %s: nreaders=%d nfakewriters=%d "
 796                "stat_interval=%d verbose=%d test_no_idle_hz=%d "
 797                "shuffle_interval = %d\n",
 798                torture_type, tag, nrealreaders, nfakewriters,
 799                stat_interval, verbose, test_no_idle_hz, shuffle_interval);
 800}
 801
 802static void
 803rcu_torture_cleanup(void)
 804{
 805        int i;
 806
 807        fullstop = 1;
 808        if (shuffler_task != NULL) {
 809                VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
 810                kthread_stop(shuffler_task);
 811        }
 812        shuffler_task = NULL;
 813
 814        if (writer_task != NULL) {
 815                VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
 816                kthread_stop(writer_task);
 817        }
 818        writer_task = NULL;
 819
 820        if (reader_tasks != NULL) {
 821                for (i = 0; i < nrealreaders; i++) {
 822                        if (reader_tasks[i] != NULL) {
 823                                VERBOSE_PRINTK_STRING(
 824                                        "Stopping rcu_torture_reader task");
 825                                kthread_stop(reader_tasks[i]);
 826                        }
 827                        reader_tasks[i] = NULL;
 828                }
 829                kfree(reader_tasks);
 830                reader_tasks = NULL;
 831        }
 832        rcu_torture_current = NULL;
 833
 834        if (fakewriter_tasks != NULL) {
 835                for (i = 0; i < nfakewriters; i++) {
 836                        if (fakewriter_tasks[i] != NULL) {
 837                                VERBOSE_PRINTK_STRING(
 838                                        "Stopping rcu_torture_fakewriter task");
 839                                kthread_stop(fakewriter_tasks[i]);
 840                        }
 841                        fakewriter_tasks[i] = NULL;
 842                }
 843                kfree(fakewriter_tasks);
 844                fakewriter_tasks = NULL;
 845        }
 846
 847        if (stats_task != NULL) {
 848                VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
 849                kthread_stop(stats_task);
 850        }
 851        stats_task = NULL;
 852
 853        /* Wait for all RCU callbacks to fire.  */
 854        rcu_barrier();
 855
 856        rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
 857
 858        if (cur_ops->cleanup != NULL)
 859                cur_ops->cleanup();
 860        if (atomic_read(&n_rcu_torture_error))
 861                rcu_torture_print_module_parms("End of test: FAILURE");
 862        else
 863                rcu_torture_print_module_parms("End of test: SUCCESS");
 864}
 865
 866static int
 867rcu_torture_init(void)
 868{
 869        int i;
 870        int cpu;
 871        int firsterr = 0;
 872
 873        /* Process args and tell the world that the torturer is on the job. */
 874
 875        for (i = 0; cur_ops = torture_ops[i], cur_ops != NULL; i++) {
 876                cur_ops = torture_ops[i];
 877                if (strcmp(torture_type, cur_ops->name) == 0) {
 878                        break;
 879                }
 880        }
 881        if (cur_ops == NULL) {
 882                printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
 883                       torture_type);
 884                return (-EINVAL);
 885        }
 886        if (cur_ops->init != NULL)
 887                cur_ops->init(); /* no "goto unwind" prior to this point!!! */
 888
 889        if (nreaders >= 0)
 890                nrealreaders = nreaders;
 891        else
 892                nrealreaders = 2 * num_online_cpus();
 893        rcu_torture_print_module_parms("Start of test");
 894        fullstop = 0;
 895
 896        /* Set up the freelist. */
 897
 898        INIT_LIST_HEAD(&rcu_torture_freelist);
 899        for (i = 0; i < sizeof(rcu_tortures) / sizeof(rcu_tortures[0]); i++) {
 900                rcu_tortures[i].rtort_mbtest = 0;
 901                list_add_tail(&rcu_tortures[i].rtort_free,
 902                              &rcu_torture_freelist);
 903        }
 904
 905        /* Initialize the statistics so that each run gets its own numbers. */
 906
 907        rcu_torture_current = NULL;
 908        rcu_torture_current_version = 0;
 909        atomic_set(&n_rcu_torture_alloc, 0);
 910        atomic_set(&n_rcu_torture_alloc_fail, 0);
 911        atomic_set(&n_rcu_torture_free, 0);
 912        atomic_set(&n_rcu_torture_mberror, 0);
 913        atomic_set(&n_rcu_torture_error, 0);
 914        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
 915                atomic_set(&rcu_torture_wcount[i], 0);
 916        for_each_possible_cpu(cpu) {
 917                for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
 918                        per_cpu(rcu_torture_count, cpu)[i] = 0;
 919                        per_cpu(rcu_torture_batch, cpu)[i] = 0;
 920                }
 921        }
 922
 923        /* Start up the kthreads. */
 924
 925        VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
 926        writer_task = kthread_run(rcu_torture_writer, NULL,
 927                                  "rcu_torture_writer");
 928        if (IS_ERR(writer_task)) {
 929                firsterr = PTR_ERR(writer_task);
 930                VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
 931                writer_task = NULL;
 932                goto unwind;
 933        }
 934        fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
 935                                   GFP_KERNEL);
 936        if (fakewriter_tasks == NULL) {
 937                VERBOSE_PRINTK_ERRSTRING("out of memory");
 938                firsterr = -ENOMEM;
 939                goto unwind;
 940        }
 941        for (i = 0; i < nfakewriters; i++) {
 942                VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
 943                fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
 944                                                  "rcu_torture_fakewriter");
 945                if (IS_ERR(fakewriter_tasks[i])) {
 946                        firsterr = PTR_ERR(fakewriter_tasks[i]);
 947                        VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
 948                        fakewriter_tasks[i] = NULL;
 949                        goto unwind;
 950                }
 951        }
 952        reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
 953                               GFP_KERNEL);
 954        if (reader_tasks == NULL) {
 955                VERBOSE_PRINTK_ERRSTRING("out of memory");
 956                firsterr = -ENOMEM;
 957                goto unwind;
 958        }
 959        for (i = 0; i < nrealreaders; i++) {
 960                VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
 961                reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
 962                                              "rcu_torture_reader");
 963                if (IS_ERR(reader_tasks[i])) {
 964                        firsterr = PTR_ERR(reader_tasks[i]);
 965                        VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
 966                        reader_tasks[i] = NULL;
 967                        goto unwind;
 968                }
 969        }
 970        if (stat_interval > 0) {
 971                VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
 972                stats_task = kthread_run(rcu_torture_stats, NULL,
 973                                        "rcu_torture_stats");
 974                if (IS_ERR(stats_task)) {
 975                        firsterr = PTR_ERR(stats_task);
 976                        VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
 977                        stats_task = NULL;
 978                        goto unwind;
 979                }
 980        }
 981        if (test_no_idle_hz) {
 982                rcu_idle_cpu = num_online_cpus() - 1;
 983                /* Create the shuffler thread */
 984                shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
 985                                          "rcu_torture_shuffle");
 986                if (IS_ERR(shuffler_task)) {
 987                        firsterr = PTR_ERR(shuffler_task);
 988                        VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
 989                        shuffler_task = NULL;
 990                        goto unwind;
 991                }
 992        }
 993        return 0;
 994
 995unwind:
 996        rcu_torture_cleanup();
 997        return firsterr;
 998}
 999
1000module_init(rcu_torture_init);
1001module_exit(rcu_torture_cleanup);
1002