linux/kernel/kcsan/kcsan_test.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * KCSAN test with various race scenarious to test runtime behaviour. Since the
   4 * interface with which KCSAN's reports are obtained is via the console, this is
   5 * the output we should verify. For each test case checks the presence (or
   6 * absence) of generated reports. Relies on 'console' tracepoint to capture
   7 * reports as they appear in the kernel log.
   8 *
   9 * Makes use of KUnit for test organization, and the Torture framework for test
  10 * thread control.
  11 *
  12 * Copyright (C) 2020, Google LLC.
  13 * Author: Marco Elver <elver@google.com>
  14 */
  15
  16#define pr_fmt(fmt) "kcsan_test: " fmt
  17
  18#include <kunit/test.h>
  19#include <linux/atomic.h>
  20#include <linux/bitops.h>
  21#include <linux/jiffies.h>
  22#include <linux/kcsan-checks.h>
  23#include <linux/kernel.h>
  24#include <linux/mutex.h>
  25#include <linux/sched.h>
  26#include <linux/seqlock.h>
  27#include <linux/spinlock.h>
  28#include <linux/string.h>
  29#include <linux/timer.h>
  30#include <linux/torture.h>
  31#include <linux/tracepoint.h>
  32#include <linux/types.h>
  33#include <trace/events/printk.h>
  34
  35#define KCSAN_TEST_REQUIRES(test, cond) do {                    \
  36        if (!(cond))                                            \
  37                kunit_skip((test), "Test requires: " #cond);    \
  38} while (0)
  39
  40#ifdef CONFIG_CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE
  41#define __KCSAN_ACCESS_RW(alt) (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
  42#else
  43#define __KCSAN_ACCESS_RW(alt) (alt)
  44#endif
  45
  46/* Points to current test-case memory access "kernels". */
  47static void (*access_kernels[2])(void);
  48
  49static struct task_struct **threads; /* Lists of threads. */
  50static unsigned long end_time;       /* End time of test. */
  51
  52/* Report as observed from console. */
  53static struct {
  54        spinlock_t lock;
  55        int nlines;
  56        char lines[3][512];
  57} observed = {
  58        .lock = __SPIN_LOCK_UNLOCKED(observed.lock),
  59};
  60
  61/* Setup test checking loop. */
  62static __no_kcsan inline void
  63begin_test_checks(void (*func1)(void), void (*func2)(void))
  64{
  65        kcsan_disable_current();
  66
  67        /*
  68         * Require at least as long as KCSAN_REPORT_ONCE_IN_MS, to ensure at
  69         * least one race is reported.
  70         */
  71        end_time = jiffies + msecs_to_jiffies(CONFIG_KCSAN_REPORT_ONCE_IN_MS + 500);
  72
  73        /* Signal start; release potential initialization of shared data. */
  74        smp_store_release(&access_kernels[0], func1);
  75        smp_store_release(&access_kernels[1], func2);
  76}
  77
  78/* End test checking loop. */
  79static __no_kcsan inline bool
  80end_test_checks(bool stop)
  81{
  82        if (!stop && time_before(jiffies, end_time)) {
  83                /* Continue checking */
  84                might_sleep();
  85                return false;
  86        }
  87
  88        kcsan_enable_current();
  89        return true;
  90}
  91
  92/*
  93 * Probe for console output: checks if a race was reported, and obtains observed
  94 * lines of interest.
  95 */
  96__no_kcsan
  97static void probe_console(void *ignore, const char *buf, size_t len)
  98{
  99        unsigned long flags;
 100        int nlines;
 101
 102        /*
 103         * Note that KCSAN reports under a global lock, so we do not risk the
 104         * possibility of having multiple reports interleaved. If that were the
 105         * case, we'd expect tests to fail.
 106         */
 107
 108        spin_lock_irqsave(&observed.lock, flags);
 109        nlines = observed.nlines;
 110
 111        if (strnstr(buf, "BUG: KCSAN: ", len) && strnstr(buf, "test_", len)) {
 112                /*
 113                 * KCSAN report and related to the test.
 114                 *
 115                 * The provided @buf is not NUL-terminated; copy no more than
 116                 * @len bytes and let strscpy() add the missing NUL-terminator.
 117                 */
 118                strscpy(observed.lines[0], buf, min(len + 1, sizeof(observed.lines[0])));
 119                nlines = 1;
 120        } else if ((nlines == 1 || nlines == 2) && strnstr(buf, "bytes by", len)) {
 121                strscpy(observed.lines[nlines++], buf, min(len + 1, sizeof(observed.lines[0])));
 122
 123                if (strnstr(buf, "race at unknown origin", len)) {
 124                        if (WARN_ON(nlines != 2))
 125                                goto out;
 126
 127                        /* No second line of interest. */
 128                        strcpy(observed.lines[nlines++], "<none>");
 129                }
 130        }
 131
 132out:
 133        WRITE_ONCE(observed.nlines, nlines); /* Publish new nlines. */
 134        spin_unlock_irqrestore(&observed.lock, flags);
 135}
 136
 137/* Check if a report related to the test exists. */
 138__no_kcsan
 139static bool report_available(void)
 140{
 141        return READ_ONCE(observed.nlines) == ARRAY_SIZE(observed.lines);
 142}
 143
 144/* Report information we expect in a report. */
 145struct expect_report {
 146        /* Access information of both accesses. */
 147        struct {
 148                void *fn;    /* Function pointer to expected function of top frame. */
 149                void *addr;  /* Address of access; unchecked if NULL. */
 150                size_t size; /* Size of access; unchecked if @addr is NULL. */
 151                int type;    /* Access type, see KCSAN_ACCESS definitions. */
 152        } access[2];
 153};
 154
 155/* Check observed report matches information in @r. */
 156__no_kcsan
 157static bool __report_matches(const struct expect_report *r)
 158{
 159        const bool is_assert = (r->access[0].type | r->access[1].type) & KCSAN_ACCESS_ASSERT;
 160        bool ret = false;
 161        unsigned long flags;
 162        typeof(observed.lines) expect;
 163        const char *end;
 164        char *cur;
 165        int i;
 166
 167        /* Doubled-checked locking. */
 168        if (!report_available())
 169                return false;
 170
 171        /* Generate expected report contents. */
 172
 173        /* Title */
 174        cur = expect[0];
 175        end = &expect[0][sizeof(expect[0]) - 1];
 176        cur += scnprintf(cur, end - cur, "BUG: KCSAN: %s in ",
 177                         is_assert ? "assert: race" : "data-race");
 178        if (r->access[1].fn) {
 179                char tmp[2][64];
 180                int cmp;
 181
 182                /* Expect lexographically sorted function names in title. */
 183                scnprintf(tmp[0], sizeof(tmp[0]), "%pS", r->access[0].fn);
 184                scnprintf(tmp[1], sizeof(tmp[1]), "%pS", r->access[1].fn);
 185                cmp = strcmp(tmp[0], tmp[1]);
 186                cur += scnprintf(cur, end - cur, "%ps / %ps",
 187                                 cmp < 0 ? r->access[0].fn : r->access[1].fn,
 188                                 cmp < 0 ? r->access[1].fn : r->access[0].fn);
 189        } else {
 190                scnprintf(cur, end - cur, "%pS", r->access[0].fn);
 191                /* The exact offset won't match, remove it. */
 192                cur = strchr(expect[0], '+');
 193                if (cur)
 194                        *cur = '\0';
 195        }
 196
 197        /* Access 1 */
 198        cur = expect[1];
 199        end = &expect[1][sizeof(expect[1]) - 1];
 200        if (!r->access[1].fn)
 201                cur += scnprintf(cur, end - cur, "race at unknown origin, with ");
 202
 203        /* Access 1 & 2 */
 204        for (i = 0; i < 2; ++i) {
 205                const int ty = r->access[i].type;
 206                const char *const access_type =
 207                        (ty & KCSAN_ACCESS_ASSERT) ?
 208                                      ((ty & KCSAN_ACCESS_WRITE) ?
 209                                               "assert no accesses" :
 210                                               "assert no writes") :
 211                                      ((ty & KCSAN_ACCESS_WRITE) ?
 212                                               ((ty & KCSAN_ACCESS_COMPOUND) ?
 213                                                        "read-write" :
 214                                                        "write") :
 215                                               "read");
 216                const bool is_atomic = (ty & KCSAN_ACCESS_ATOMIC);
 217                const bool is_scoped = (ty & KCSAN_ACCESS_SCOPED);
 218                const char *const access_type_aux =
 219                                (is_atomic && is_scoped)        ? " (marked, reordered)"
 220                                : (is_atomic                    ? " (marked)"
 221                                   : (is_scoped                 ? " (reordered)" : ""));
 222
 223                if (i == 1) {
 224                        /* Access 2 */
 225                        cur = expect[2];
 226                        end = &expect[2][sizeof(expect[2]) - 1];
 227
 228                        if (!r->access[1].fn) {
 229                                /* Dummy string if no second access is available. */
 230                                strcpy(cur, "<none>");
 231                                break;
 232                        }
 233                }
 234
 235                cur += scnprintf(cur, end - cur, "%s%s to ", access_type,
 236                                 access_type_aux);
 237
 238                if (r->access[i].addr) /* Address is optional. */
 239                        cur += scnprintf(cur, end - cur, "0x%px of %zu bytes",
 240                                         r->access[i].addr, r->access[i].size);
 241        }
 242
 243        spin_lock_irqsave(&observed.lock, flags);
 244        if (!report_available())
 245                goto out; /* A new report is being captured. */
 246
 247        /* Finally match expected output to what we actually observed. */
 248        ret = strstr(observed.lines[0], expect[0]) &&
 249              /* Access info may appear in any order. */
 250              ((strstr(observed.lines[1], expect[1]) &&
 251                strstr(observed.lines[2], expect[2])) ||
 252               (strstr(observed.lines[1], expect[2]) &&
 253                strstr(observed.lines[2], expect[1])));
 254out:
 255        spin_unlock_irqrestore(&observed.lock, flags);
 256        return ret;
 257}
 258
 259static __always_inline const struct expect_report *
 260__report_set_scoped(struct expect_report *r, int accesses)
 261{
 262        BUILD_BUG_ON(accesses > 3);
 263
 264        if (accesses & 1)
 265                r->access[0].type |= KCSAN_ACCESS_SCOPED;
 266        else
 267                r->access[0].type &= ~KCSAN_ACCESS_SCOPED;
 268
 269        if (accesses & 2)
 270                r->access[1].type |= KCSAN_ACCESS_SCOPED;
 271        else
 272                r->access[1].type &= ~KCSAN_ACCESS_SCOPED;
 273
 274        return r;
 275}
 276
 277__no_kcsan
 278static bool report_matches_any_reordered(struct expect_report *r)
 279{
 280        return __report_matches(__report_set_scoped(r, 0)) ||
 281               __report_matches(__report_set_scoped(r, 1)) ||
 282               __report_matches(__report_set_scoped(r, 2)) ||
 283               __report_matches(__report_set_scoped(r, 3));
 284}
 285
 286#ifdef CONFIG_KCSAN_WEAK_MEMORY
 287/* Due to reordering accesses, any access may appear as "(reordered)". */
 288#define report_matches report_matches_any_reordered
 289#else
 290#define report_matches __report_matches
 291#endif
 292
 293/* ===== Test kernels ===== */
 294
 295static long test_sink;
 296static long test_var;
 297/* @test_array should be large enough to fall into multiple watchpoint slots. */
 298static long test_array[3 * PAGE_SIZE / sizeof(long)];
 299static struct {
 300        long val[8];
 301} test_struct;
 302static DEFINE_SEQLOCK(test_seqlock);
 303static DEFINE_SPINLOCK(test_spinlock);
 304static DEFINE_MUTEX(test_mutex);
 305
 306/*
 307 * Helper to avoid compiler optimizing out reads, and to generate source values
 308 * for writes.
 309 */
 310__no_kcsan
 311static noinline void sink_value(long v) { WRITE_ONCE(test_sink, v); }
 312
 313/*
 314 * Generates a delay and some accesses that enter the runtime but do not produce
 315 * data races.
 316 */
 317static noinline void test_delay(int iter)
 318{
 319        while (iter--)
 320                sink_value(READ_ONCE(test_sink));
 321}
 322
 323static noinline void test_kernel_read(void) { sink_value(test_var); }
 324
 325static noinline void test_kernel_write(void)
 326{
 327        test_var = READ_ONCE_NOCHECK(test_sink) + 1;
 328}
 329
 330static noinline void test_kernel_write_nochange(void) { test_var = 42; }
 331
 332/* Suffixed by value-change exception filter. */
 333static noinline void test_kernel_write_nochange_rcu(void) { test_var = 42; }
 334
 335static noinline void test_kernel_read_atomic(void)
 336{
 337        sink_value(READ_ONCE(test_var));
 338}
 339
 340static noinline void test_kernel_write_atomic(void)
 341{
 342        WRITE_ONCE(test_var, READ_ONCE_NOCHECK(test_sink) + 1);
 343}
 344
 345static noinline void test_kernel_atomic_rmw(void)
 346{
 347        /* Use builtin, so we can set up the "bad" atomic/non-atomic scenario. */
 348        __atomic_fetch_add(&test_var, 1, __ATOMIC_RELAXED);
 349}
 350
 351__no_kcsan
 352static noinline void test_kernel_write_uninstrumented(void) { test_var++; }
 353
 354static noinline void test_kernel_data_race(void) { data_race(test_var++); }
 355
 356static noinline void test_kernel_assert_writer(void)
 357{
 358        ASSERT_EXCLUSIVE_WRITER(test_var);
 359}
 360
 361static noinline void test_kernel_assert_access(void)
 362{
 363        ASSERT_EXCLUSIVE_ACCESS(test_var);
 364}
 365
 366#define TEST_CHANGE_BITS 0xff00ff00
 367
 368static noinline void test_kernel_change_bits(void)
 369{
 370        if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) {
 371                /*
 372                 * Avoid race of unknown origin for this test, just pretend they
 373                 * are atomic.
 374                 */
 375                kcsan_nestable_atomic_begin();
 376                test_var ^= TEST_CHANGE_BITS;
 377                kcsan_nestable_atomic_end();
 378        } else
 379                WRITE_ONCE(test_var, READ_ONCE(test_var) ^ TEST_CHANGE_BITS);
 380}
 381
 382static noinline void test_kernel_assert_bits_change(void)
 383{
 384        ASSERT_EXCLUSIVE_BITS(test_var, TEST_CHANGE_BITS);
 385}
 386
 387static noinline void test_kernel_assert_bits_nochange(void)
 388{
 389        ASSERT_EXCLUSIVE_BITS(test_var, ~TEST_CHANGE_BITS);
 390}
 391
 392/*
 393 * Scoped assertions do trigger anywhere in scope. However, the report should
 394 * still only point at the start of the scope.
 395 */
 396static noinline void test_enter_scope(void)
 397{
 398        int x = 0;
 399
 400        /* Unrelated accesses to scoped assert. */
 401        READ_ONCE(test_sink);
 402        kcsan_check_read(&x, sizeof(x));
 403}
 404
 405static noinline void test_kernel_assert_writer_scoped(void)
 406{
 407        ASSERT_EXCLUSIVE_WRITER_SCOPED(test_var);
 408        test_enter_scope();
 409}
 410
 411static noinline void test_kernel_assert_access_scoped(void)
 412{
 413        ASSERT_EXCLUSIVE_ACCESS_SCOPED(test_var);
 414        test_enter_scope();
 415}
 416
 417static noinline void test_kernel_rmw_array(void)
 418{
 419        int i;
 420
 421        for (i = 0; i < ARRAY_SIZE(test_array); ++i)
 422                test_array[i]++;
 423}
 424
 425static noinline void test_kernel_write_struct(void)
 426{
 427        kcsan_check_write(&test_struct, sizeof(test_struct));
 428        kcsan_disable_current();
 429        test_struct.val[3]++; /* induce value change */
 430        kcsan_enable_current();
 431}
 432
 433static noinline void test_kernel_write_struct_part(void)
 434{
 435        test_struct.val[3] = 42;
 436}
 437
 438static noinline void test_kernel_read_struct_zero_size(void)
 439{
 440        kcsan_check_read(&test_struct.val[3], 0);
 441}
 442
 443static noinline void test_kernel_jiffies_reader(void)
 444{
 445        sink_value((long)jiffies);
 446}
 447
 448static noinline void test_kernel_seqlock_reader(void)
 449{
 450        unsigned int seq;
 451
 452        do {
 453                seq = read_seqbegin(&test_seqlock);
 454                sink_value(test_var);
 455        } while (read_seqretry(&test_seqlock, seq));
 456}
 457
 458static noinline void test_kernel_seqlock_writer(void)
 459{
 460        unsigned long flags;
 461
 462        write_seqlock_irqsave(&test_seqlock, flags);
 463        test_var++;
 464        write_sequnlock_irqrestore(&test_seqlock, flags);
 465}
 466
 467static noinline void test_kernel_atomic_builtins(void)
 468{
 469        /*
 470         * Generate concurrent accesses, expecting no reports, ensuring KCSAN
 471         * treats builtin atomics as actually atomic.
 472         */
 473        __atomic_load_n(&test_var, __ATOMIC_RELAXED);
 474}
 475
 476static noinline void test_kernel_xor_1bit(void)
 477{
 478        /* Do not report data races between the read-writes. */
 479        kcsan_nestable_atomic_begin();
 480        test_var ^= 0x10000;
 481        kcsan_nestable_atomic_end();
 482}
 483
 484#define TEST_KERNEL_LOCKED(name, acquire, release)              \
 485        static noinline void test_kernel_##name(void)           \
 486        {                                                       \
 487                long *flag = &test_struct.val[0];               \
 488                long v = 0;                                     \
 489                if (!(acquire))                                 \
 490                        return;                                 \
 491                while (v++ < 100) {                             \
 492                        test_var++;                             \
 493                        barrier();                              \
 494                }                                               \
 495                release;                                        \
 496                test_delay(10);                                 \
 497        }
 498
 499TEST_KERNEL_LOCKED(with_memorder,
 500                   cmpxchg_acquire(flag, 0, 1) == 0,
 501                   smp_store_release(flag, 0));
 502TEST_KERNEL_LOCKED(wrong_memorder,
 503                   cmpxchg_relaxed(flag, 0, 1) == 0,
 504                   WRITE_ONCE(*flag, 0));
 505TEST_KERNEL_LOCKED(atomic_builtin_with_memorder,
 506                   __atomic_compare_exchange_n(flag, &v, 1, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED),
 507                   __atomic_store_n(flag, 0, __ATOMIC_RELEASE));
 508TEST_KERNEL_LOCKED(atomic_builtin_wrong_memorder,
 509                   __atomic_compare_exchange_n(flag, &v, 1, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED),
 510                   __atomic_store_n(flag, 0, __ATOMIC_RELAXED));
 511
 512/* ===== Test cases ===== */
 513
 514/*
 515 * Tests that various barriers have the expected effect on internal state. Not
 516 * exhaustive on atomic_t operations. Unlike the selftest, also checks for
 517 * too-strict barrier instrumentation; these can be tolerated, because it does
 518 * not cause false positives, but at least we should be aware of such cases.
 519 */
 520static void test_barrier_nothreads(struct kunit *test)
 521{
 522#ifdef CONFIG_KCSAN_WEAK_MEMORY
 523        struct kcsan_scoped_access *reorder_access = &current->kcsan_ctx.reorder_access;
 524#else
 525        struct kcsan_scoped_access *reorder_access = NULL;
 526#endif
 527        arch_spinlock_t arch_spinlock = __ARCH_SPIN_LOCK_UNLOCKED;
 528        atomic_t dummy;
 529
 530        KCSAN_TEST_REQUIRES(test, reorder_access != NULL);
 531        KCSAN_TEST_REQUIRES(test, IS_ENABLED(CONFIG_SMP));
 532
 533#define __KCSAN_EXPECT_BARRIER(access_type, barrier, order_before, name)                        \
 534        do {                                                                                    \
 535                reorder_access->type = (access_type) | KCSAN_ACCESS_SCOPED;                     \
 536                reorder_access->size = sizeof(test_var);                                        \
 537                barrier;                                                                        \
 538                KUNIT_EXPECT_EQ_MSG(test, reorder_access->size,                                 \
 539                                    order_before ? 0 : sizeof(test_var),                        \
 540                                    "improperly instrumented type=(" #access_type "): " name);  \
 541        } while (0)
 542#define KCSAN_EXPECT_READ_BARRIER(b, o)  __KCSAN_EXPECT_BARRIER(0, b, o, #b)
 543#define KCSAN_EXPECT_WRITE_BARRIER(b, o) __KCSAN_EXPECT_BARRIER(KCSAN_ACCESS_WRITE, b, o, #b)
 544#define KCSAN_EXPECT_RW_BARRIER(b, o)    __KCSAN_EXPECT_BARRIER(KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE, b, o, #b)
 545
 546        /*
 547         * Lockdep initialization can strengthen certain locking operations due
 548         * to calling into instrumented files; "warm up" our locks.
 549         */
 550        spin_lock(&test_spinlock);
 551        spin_unlock(&test_spinlock);
 552        mutex_lock(&test_mutex);
 553        mutex_unlock(&test_mutex);
 554
 555        /* Force creating a valid entry in reorder_access first. */
 556        test_var = 0;
 557        while (test_var++ < 1000000 && reorder_access->size != sizeof(test_var))
 558                __kcsan_check_read(&test_var, sizeof(test_var));
 559        KUNIT_ASSERT_EQ(test, reorder_access->size, sizeof(test_var));
 560
 561        kcsan_nestable_atomic_begin(); /* No watchpoints in called functions. */
 562
 563        KCSAN_EXPECT_READ_BARRIER(mb(), true);
 564        KCSAN_EXPECT_READ_BARRIER(wmb(), false);
 565        KCSAN_EXPECT_READ_BARRIER(rmb(), true);
 566        KCSAN_EXPECT_READ_BARRIER(smp_mb(), true);
 567        KCSAN_EXPECT_READ_BARRIER(smp_wmb(), false);
 568        KCSAN_EXPECT_READ_BARRIER(smp_rmb(), true);
 569        KCSAN_EXPECT_READ_BARRIER(dma_wmb(), false);
 570        KCSAN_EXPECT_READ_BARRIER(dma_rmb(), true);
 571        KCSAN_EXPECT_READ_BARRIER(smp_mb__before_atomic(), true);
 572        KCSAN_EXPECT_READ_BARRIER(smp_mb__after_atomic(), true);
 573        KCSAN_EXPECT_READ_BARRIER(smp_mb__after_spinlock(), true);
 574        KCSAN_EXPECT_READ_BARRIER(smp_store_mb(test_var, 0), true);
 575        KCSAN_EXPECT_READ_BARRIER(smp_load_acquire(&test_var), false);
 576        KCSAN_EXPECT_READ_BARRIER(smp_store_release(&test_var, 0), true);
 577        KCSAN_EXPECT_READ_BARRIER(xchg(&test_var, 0), true);
 578        KCSAN_EXPECT_READ_BARRIER(xchg_release(&test_var, 0), true);
 579        KCSAN_EXPECT_READ_BARRIER(xchg_relaxed(&test_var, 0), false);
 580        KCSAN_EXPECT_READ_BARRIER(cmpxchg(&test_var, 0,  0), true);
 581        KCSAN_EXPECT_READ_BARRIER(cmpxchg_release(&test_var, 0,  0), true);
 582        KCSAN_EXPECT_READ_BARRIER(cmpxchg_relaxed(&test_var, 0,  0), false);
 583        KCSAN_EXPECT_READ_BARRIER(atomic_read(&dummy), false);
 584        KCSAN_EXPECT_READ_BARRIER(atomic_read_acquire(&dummy), false);
 585        KCSAN_EXPECT_READ_BARRIER(atomic_set(&dummy, 0), false);
 586        KCSAN_EXPECT_READ_BARRIER(atomic_set_release(&dummy, 0), true);
 587        KCSAN_EXPECT_READ_BARRIER(atomic_add(1, &dummy), false);
 588        KCSAN_EXPECT_READ_BARRIER(atomic_add_return(1, &dummy), true);
 589        KCSAN_EXPECT_READ_BARRIER(atomic_add_return_acquire(1, &dummy), false);
 590        KCSAN_EXPECT_READ_BARRIER(atomic_add_return_release(1, &dummy), true);
 591        KCSAN_EXPECT_READ_BARRIER(atomic_add_return_relaxed(1, &dummy), false);
 592        KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add(1, &dummy), true);
 593        KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add_acquire(1, &dummy), false);
 594        KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add_release(1, &dummy), true);
 595        KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add_relaxed(1, &dummy), false);
 596        KCSAN_EXPECT_READ_BARRIER(test_and_set_bit(0, &test_var), true);
 597        KCSAN_EXPECT_READ_BARRIER(test_and_clear_bit(0, &test_var), true);
 598        KCSAN_EXPECT_READ_BARRIER(test_and_change_bit(0, &test_var), true);
 599        KCSAN_EXPECT_READ_BARRIER(clear_bit_unlock(0, &test_var), true);
 600        KCSAN_EXPECT_READ_BARRIER(__clear_bit_unlock(0, &test_var), true);
 601        KCSAN_EXPECT_READ_BARRIER(arch_spin_lock(&arch_spinlock), false);
 602        KCSAN_EXPECT_READ_BARRIER(arch_spin_unlock(&arch_spinlock), true);
 603        KCSAN_EXPECT_READ_BARRIER(spin_lock(&test_spinlock), false);
 604        KCSAN_EXPECT_READ_BARRIER(spin_unlock(&test_spinlock), true);
 605        KCSAN_EXPECT_READ_BARRIER(mutex_lock(&test_mutex), false);
 606        KCSAN_EXPECT_READ_BARRIER(mutex_unlock(&test_mutex), true);
 607
 608        KCSAN_EXPECT_WRITE_BARRIER(mb(), true);
 609        KCSAN_EXPECT_WRITE_BARRIER(wmb(), true);
 610        KCSAN_EXPECT_WRITE_BARRIER(rmb(), false);
 611        KCSAN_EXPECT_WRITE_BARRIER(smp_mb(), true);
 612        KCSAN_EXPECT_WRITE_BARRIER(smp_wmb(), true);
 613        KCSAN_EXPECT_WRITE_BARRIER(smp_rmb(), false);
 614        KCSAN_EXPECT_WRITE_BARRIER(dma_wmb(), true);
 615        KCSAN_EXPECT_WRITE_BARRIER(dma_rmb(), false);
 616        KCSAN_EXPECT_WRITE_BARRIER(smp_mb__before_atomic(), true);
 617        KCSAN_EXPECT_WRITE_BARRIER(smp_mb__after_atomic(), true);
 618        KCSAN_EXPECT_WRITE_BARRIER(smp_mb__after_spinlock(), true);
 619        KCSAN_EXPECT_WRITE_BARRIER(smp_store_mb(test_var, 0), true);
 620        KCSAN_EXPECT_WRITE_BARRIER(smp_load_acquire(&test_var), false);
 621        KCSAN_EXPECT_WRITE_BARRIER(smp_store_release(&test_var, 0), true);
 622        KCSAN_EXPECT_WRITE_BARRIER(xchg(&test_var, 0), true);
 623        KCSAN_EXPECT_WRITE_BARRIER(xchg_release(&test_var, 0), true);
 624        KCSAN_EXPECT_WRITE_BARRIER(xchg_relaxed(&test_var, 0), false);
 625        KCSAN_EXPECT_WRITE_BARRIER(cmpxchg(&test_var, 0,  0), true);
 626        KCSAN_EXPECT_WRITE_BARRIER(cmpxchg_release(&test_var, 0,  0), true);
 627        KCSAN_EXPECT_WRITE_BARRIER(cmpxchg_relaxed(&test_var, 0,  0), false);
 628        KCSAN_EXPECT_WRITE_BARRIER(atomic_read(&dummy), false);
 629        KCSAN_EXPECT_WRITE_BARRIER(atomic_read_acquire(&dummy), false);
 630        KCSAN_EXPECT_WRITE_BARRIER(atomic_set(&dummy, 0), false);
 631        KCSAN_EXPECT_WRITE_BARRIER(atomic_set_release(&dummy, 0), true);
 632        KCSAN_EXPECT_WRITE_BARRIER(atomic_add(1, &dummy), false);
 633        KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return(1, &dummy), true);
 634        KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return_acquire(1, &dummy), false);
 635        KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return_release(1, &dummy), true);
 636        KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return_relaxed(1, &dummy), false);
 637        KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add(1, &dummy), true);
 638        KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add_acquire(1, &dummy), false);
 639        KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add_release(1, &dummy), true);
 640        KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add_relaxed(1, &dummy), false);
 641        KCSAN_EXPECT_WRITE_BARRIER(test_and_set_bit(0, &test_var), true);
 642        KCSAN_EXPECT_WRITE_BARRIER(test_and_clear_bit(0, &test_var), true);
 643        KCSAN_EXPECT_WRITE_BARRIER(test_and_change_bit(0, &test_var), true);
 644        KCSAN_EXPECT_WRITE_BARRIER(clear_bit_unlock(0, &test_var), true);
 645        KCSAN_EXPECT_WRITE_BARRIER(__clear_bit_unlock(0, &test_var), true);
 646        KCSAN_EXPECT_WRITE_BARRIER(arch_spin_lock(&arch_spinlock), false);
 647        KCSAN_EXPECT_WRITE_BARRIER(arch_spin_unlock(&arch_spinlock), true);
 648        KCSAN_EXPECT_WRITE_BARRIER(spin_lock(&test_spinlock), false);
 649        KCSAN_EXPECT_WRITE_BARRIER(spin_unlock(&test_spinlock), true);
 650        KCSAN_EXPECT_WRITE_BARRIER(mutex_lock(&test_mutex), false);
 651        KCSAN_EXPECT_WRITE_BARRIER(mutex_unlock(&test_mutex), true);
 652
 653        KCSAN_EXPECT_RW_BARRIER(mb(), true);
 654        KCSAN_EXPECT_RW_BARRIER(wmb(), true);
 655        KCSAN_EXPECT_RW_BARRIER(rmb(), true);
 656        KCSAN_EXPECT_RW_BARRIER(smp_mb(), true);
 657        KCSAN_EXPECT_RW_BARRIER(smp_wmb(), true);
 658        KCSAN_EXPECT_RW_BARRIER(smp_rmb(), true);
 659        KCSAN_EXPECT_RW_BARRIER(dma_wmb(), true);
 660        KCSAN_EXPECT_RW_BARRIER(dma_rmb(), true);
 661        KCSAN_EXPECT_RW_BARRIER(smp_mb__before_atomic(), true);
 662        KCSAN_EXPECT_RW_BARRIER(smp_mb__after_atomic(), true);
 663        KCSAN_EXPECT_RW_BARRIER(smp_mb__after_spinlock(), true);
 664        KCSAN_EXPECT_RW_BARRIER(smp_store_mb(test_var, 0), true);
 665        KCSAN_EXPECT_RW_BARRIER(smp_load_acquire(&test_var), false);
 666        KCSAN_EXPECT_RW_BARRIER(smp_store_release(&test_var, 0), true);
 667        KCSAN_EXPECT_RW_BARRIER(xchg(&test_var, 0), true);
 668        KCSAN_EXPECT_RW_BARRIER(xchg_release(&test_var, 0), true);
 669        KCSAN_EXPECT_RW_BARRIER(xchg_relaxed(&test_var, 0), false);
 670        KCSAN_EXPECT_RW_BARRIER(cmpxchg(&test_var, 0,  0), true);
 671        KCSAN_EXPECT_RW_BARRIER(cmpxchg_release(&test_var, 0,  0), true);
 672        KCSAN_EXPECT_RW_BARRIER(cmpxchg_relaxed(&test_var, 0,  0), false);
 673        KCSAN_EXPECT_RW_BARRIER(atomic_read(&dummy), false);
 674        KCSAN_EXPECT_RW_BARRIER(atomic_read_acquire(&dummy), false);
 675        KCSAN_EXPECT_RW_BARRIER(atomic_set(&dummy, 0), false);
 676        KCSAN_EXPECT_RW_BARRIER(atomic_set_release(&dummy, 0), true);
 677        KCSAN_EXPECT_RW_BARRIER(atomic_add(1, &dummy), false);
 678        KCSAN_EXPECT_RW_BARRIER(atomic_add_return(1, &dummy), true);
 679        KCSAN_EXPECT_RW_BARRIER(atomic_add_return_acquire(1, &dummy), false);
 680        KCSAN_EXPECT_RW_BARRIER(atomic_add_return_release(1, &dummy), true);
 681        KCSAN_EXPECT_RW_BARRIER(atomic_add_return_relaxed(1, &dummy), false);
 682        KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add(1, &dummy), true);
 683        KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add_acquire(1, &dummy), false);
 684        KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add_release(1, &dummy), true);
 685        KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add_relaxed(1, &dummy), false);
 686        KCSAN_EXPECT_RW_BARRIER(test_and_set_bit(0, &test_var), true);
 687        KCSAN_EXPECT_RW_BARRIER(test_and_clear_bit(0, &test_var), true);
 688        KCSAN_EXPECT_RW_BARRIER(test_and_change_bit(0, &test_var), true);
 689        KCSAN_EXPECT_RW_BARRIER(clear_bit_unlock(0, &test_var), true);
 690        KCSAN_EXPECT_RW_BARRIER(__clear_bit_unlock(0, &test_var), true);
 691        KCSAN_EXPECT_RW_BARRIER(arch_spin_lock(&arch_spinlock), false);
 692        KCSAN_EXPECT_RW_BARRIER(arch_spin_unlock(&arch_spinlock), true);
 693        KCSAN_EXPECT_RW_BARRIER(spin_lock(&test_spinlock), false);
 694        KCSAN_EXPECT_RW_BARRIER(spin_unlock(&test_spinlock), true);
 695        KCSAN_EXPECT_RW_BARRIER(mutex_lock(&test_mutex), false);
 696        KCSAN_EXPECT_RW_BARRIER(mutex_unlock(&test_mutex), true);
 697
 698#ifdef clear_bit_unlock_is_negative_byte
 699        KCSAN_EXPECT_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true);
 700        KCSAN_EXPECT_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true);
 701        KCSAN_EXPECT_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true);
 702#endif
 703        kcsan_nestable_atomic_end();
 704}
 705
 706/* Simple test with normal data race. */
 707__no_kcsan
 708static void test_basic(struct kunit *test)
 709{
 710        struct expect_report expect = {
 711                .access = {
 712                        { test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
 713                        { test_kernel_read, &test_var, sizeof(test_var), 0 },
 714                },
 715        };
 716        struct expect_report never = {
 717                .access = {
 718                        { test_kernel_read, &test_var, sizeof(test_var), 0 },
 719                        { test_kernel_read, &test_var, sizeof(test_var), 0 },
 720                },
 721        };
 722        bool match_expect = false;
 723        bool match_never = false;
 724
 725        begin_test_checks(test_kernel_write, test_kernel_read);
 726        do {
 727                match_expect |= report_matches(&expect);
 728                match_never = report_matches(&never);
 729        } while (!end_test_checks(match_never));
 730        KUNIT_EXPECT_TRUE(test, match_expect);
 731        KUNIT_EXPECT_FALSE(test, match_never);
 732}
 733
 734/*
 735 * Stress KCSAN with lots of concurrent races on different addresses until
 736 * timeout.
 737 */
 738__no_kcsan
 739static void test_concurrent_races(struct kunit *test)
 740{
 741        struct expect_report expect = {
 742                .access = {
 743                        /* NULL will match any address. */
 744                        { test_kernel_rmw_array, NULL, 0, __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
 745                        { test_kernel_rmw_array, NULL, 0, __KCSAN_ACCESS_RW(0) },
 746                },
 747        };
 748        struct expect_report never = {
 749                .access = {
 750                        { test_kernel_rmw_array, NULL, 0, 0 },
 751                        { test_kernel_rmw_array, NULL, 0, 0 },
 752                },
 753        };
 754        bool match_expect = false;
 755        bool match_never = false;
 756
 757        begin_test_checks(test_kernel_rmw_array, test_kernel_rmw_array);
 758        do {
 759                match_expect |= report_matches(&expect);
 760                match_never |= report_matches(&never);
 761        } while (!end_test_checks(false));
 762        KUNIT_EXPECT_TRUE(test, match_expect); /* Sanity check matches exist. */
 763        KUNIT_EXPECT_FALSE(test, match_never);
 764}
 765
 766/* Test the KCSAN_REPORT_VALUE_CHANGE_ONLY option. */
 767__no_kcsan
 768static void test_novalue_change(struct kunit *test)
 769{
 770        struct expect_report expect_rw = {
 771                .access = {
 772                        { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
 773                        { test_kernel_read, &test_var, sizeof(test_var), 0 },
 774                },
 775        };
 776        struct expect_report expect_ww = {
 777                .access = {
 778                        { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
 779                        { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
 780                },
 781        };
 782        bool match_expect = false;
 783
 784        test_kernel_write_nochange(); /* Reset value. */
 785        begin_test_checks(test_kernel_write_nochange, test_kernel_read);
 786        do {
 787                match_expect = report_matches(&expect_rw) || report_matches(&expect_ww);
 788        } while (!end_test_checks(match_expect));
 789        if (IS_ENABLED(CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY))
 790                KUNIT_EXPECT_FALSE(test, match_expect);
 791        else
 792                KUNIT_EXPECT_TRUE(test, match_expect);
 793}
 794
 795/*
 796 * Test that the rules where the KCSAN_REPORT_VALUE_CHANGE_ONLY option should
 797 * never apply work.
 798 */
 799__no_kcsan
 800static void test_novalue_change_exception(struct kunit *test)
 801{
 802        struct expect_report expect_rw = {
 803                .access = {
 804                        { test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
 805                        { test_kernel_read, &test_var, sizeof(test_var), 0 },
 806                },
 807        };
 808        struct expect_report expect_ww = {
 809                .access = {
 810                        { test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
 811                        { test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
 812                },
 813        };
 814        bool match_expect = false;
 815
 816        test_kernel_write_nochange_rcu(); /* Reset value. */
 817        begin_test_checks(test_kernel_write_nochange_rcu, test_kernel_read);
 818        do {
 819                match_expect = report_matches(&expect_rw) || report_matches(&expect_ww);
 820        } while (!end_test_checks(match_expect));
 821        KUNIT_EXPECT_TRUE(test, match_expect);
 822}
 823
 824/* Test that data races of unknown origin are reported. */
 825__no_kcsan
 826static void test_unknown_origin(struct kunit *test)
 827{
 828        struct expect_report expect = {
 829                .access = {
 830                        { test_kernel_read, &test_var, sizeof(test_var), 0 },
 831                        { NULL },
 832                },
 833        };
 834        bool match_expect = false;
 835
 836        begin_test_checks(test_kernel_write_uninstrumented, test_kernel_read);
 837        do {
 838                match_expect = report_matches(&expect);
 839        } while (!end_test_checks(match_expect));
 840        if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN))
 841                KUNIT_EXPECT_TRUE(test, match_expect);
 842        else
 843                KUNIT_EXPECT_FALSE(test, match_expect);
 844}
 845
 846/* Test KCSAN_ASSUME_PLAIN_WRITES_ATOMIC if it is selected. */
 847__no_kcsan
 848static void test_write_write_assume_atomic(struct kunit *test)
 849{
 850        struct expect_report expect = {
 851                .access = {
 852                        { test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
 853                        { test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
 854                },
 855        };
 856        bool match_expect = false;
 857
 858        begin_test_checks(test_kernel_write, test_kernel_write);
 859        do {
 860                sink_value(READ_ONCE(test_var)); /* induce value-change */
 861                match_expect = report_matches(&expect);
 862        } while (!end_test_checks(match_expect));
 863        if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC))
 864                KUNIT_EXPECT_FALSE(test, match_expect);
 865        else
 866                KUNIT_EXPECT_TRUE(test, match_expect);
 867}
 868
 869/*
 870 * Test that data races with writes larger than word-size are always reported,
 871 * even if KCSAN_ASSUME_PLAIN_WRITES_ATOMIC is selected.
 872 */
 873__no_kcsan
 874static void test_write_write_struct(struct kunit *test)
 875{
 876        struct expect_report expect = {
 877                .access = {
 878                        { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
 879                        { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
 880                },
 881        };
 882        bool match_expect = false;
 883
 884        begin_test_checks(test_kernel_write_struct, test_kernel_write_struct);
 885        do {
 886                match_expect = report_matches(&expect);
 887        } while (!end_test_checks(match_expect));
 888        KUNIT_EXPECT_TRUE(test, match_expect);
 889}
 890
 891/*
 892 * Test that data races where only one write is larger than word-size are always
 893 * reported, even if KCSAN_ASSUME_PLAIN_WRITES_ATOMIC is selected.
 894 */
 895__no_kcsan
 896static void test_write_write_struct_part(struct kunit *test)
 897{
 898        struct expect_report expect = {
 899                .access = {
 900                        { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
 901                        { test_kernel_write_struct_part, &test_struct.val[3], sizeof(test_struct.val[3]), KCSAN_ACCESS_WRITE },
 902                },
 903        };
 904        bool match_expect = false;
 905
 906        begin_test_checks(test_kernel_write_struct, test_kernel_write_struct_part);
 907        do {
 908                match_expect = report_matches(&expect);
 909        } while (!end_test_checks(match_expect));
 910        KUNIT_EXPECT_TRUE(test, match_expect);
 911}
 912
 913/* Test that races with atomic accesses never result in reports. */
 914__no_kcsan
 915static void test_read_atomic_write_atomic(struct kunit *test)
 916{
 917        bool match_never = false;
 918
 919        begin_test_checks(test_kernel_read_atomic, test_kernel_write_atomic);
 920        do {
 921                match_never = report_available();
 922        } while (!end_test_checks(match_never));
 923        KUNIT_EXPECT_FALSE(test, match_never);
 924}
 925
 926/* Test that a race with an atomic and plain access result in reports. */
 927__no_kcsan
 928static void test_read_plain_atomic_write(struct kunit *test)
 929{
 930        struct expect_report expect = {
 931                .access = {
 932                        { test_kernel_read, &test_var, sizeof(test_var), 0 },
 933                        { test_kernel_write_atomic, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC },
 934                },
 935        };
 936        bool match_expect = false;
 937
 938        KCSAN_TEST_REQUIRES(test, !IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS));
 939
 940        begin_test_checks(test_kernel_read, test_kernel_write_atomic);
 941        do {
 942                match_expect = report_matches(&expect);
 943        } while (!end_test_checks(match_expect));
 944        KUNIT_EXPECT_TRUE(test, match_expect);
 945}
 946
 947/* Test that atomic RMWs generate correct report. */
 948__no_kcsan
 949static void test_read_plain_atomic_rmw(struct kunit *test)
 950{
 951        struct expect_report expect = {
 952                .access = {
 953                        { test_kernel_read, &test_var, sizeof(test_var), 0 },
 954                        { test_kernel_atomic_rmw, &test_var, sizeof(test_var),
 955                                KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC },
 956                },
 957        };
 958        bool match_expect = false;
 959
 960        KCSAN_TEST_REQUIRES(test, !IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS));
 961
 962        begin_test_checks(test_kernel_read, test_kernel_atomic_rmw);
 963        do {
 964                match_expect = report_matches(&expect);
 965        } while (!end_test_checks(match_expect));
 966        KUNIT_EXPECT_TRUE(test, match_expect);
 967}
 968
 969/* Zero-sized accesses should never cause data race reports. */
 970__no_kcsan
 971static void test_zero_size_access(struct kunit *test)
 972{
 973        struct expect_report expect = {
 974                .access = {
 975                        { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
 976                        { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
 977                },
 978        };
 979        struct expect_report never = {
 980                .access = {
 981                        { test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
 982                        { test_kernel_read_struct_zero_size, &test_struct.val[3], 0, 0 },
 983                },
 984        };
 985        bool match_expect = false;
 986        bool match_never = false;
 987
 988        begin_test_checks(test_kernel_write_struct, test_kernel_read_struct_zero_size);
 989        do {
 990                match_expect |= report_matches(&expect);
 991                match_never = report_matches(&never);
 992        } while (!end_test_checks(match_never));
 993        KUNIT_EXPECT_TRUE(test, match_expect); /* Sanity check. */
 994        KUNIT_EXPECT_FALSE(test, match_never);
 995}
 996
 997/* Test the data_race() macro. */
 998__no_kcsan
 999static void test_data_race(struct kunit *test)
1000{
1001        bool match_never = false;
1002
1003        begin_test_checks(test_kernel_data_race, test_kernel_data_race);
1004        do {
1005                match_never = report_available();
1006        } while (!end_test_checks(match_never));
1007        KUNIT_EXPECT_FALSE(test, match_never);
1008}
1009
1010__no_kcsan
1011static void test_assert_exclusive_writer(struct kunit *test)
1012{
1013        struct expect_report expect = {
1014                .access = {
1015                        { test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
1016                        { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
1017                },
1018        };
1019        bool match_expect = false;
1020
1021        begin_test_checks(test_kernel_assert_writer, test_kernel_write_nochange);
1022        do {
1023                match_expect = report_matches(&expect);
1024        } while (!end_test_checks(match_expect));
1025        KUNIT_EXPECT_TRUE(test, match_expect);
1026}
1027
1028__no_kcsan
1029static void test_assert_exclusive_access(struct kunit *test)
1030{
1031        struct expect_report expect = {
1032                .access = {
1033                        { test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE },
1034                        { test_kernel_read, &test_var, sizeof(test_var), 0 },
1035                },
1036        };
1037        bool match_expect = false;
1038
1039        begin_test_checks(test_kernel_assert_access, test_kernel_read);
1040        do {
1041                match_expect = report_matches(&expect);
1042        } while (!end_test_checks(match_expect));
1043        KUNIT_EXPECT_TRUE(test, match_expect);
1044}
1045
1046__no_kcsan
1047static void test_assert_exclusive_access_writer(struct kunit *test)
1048{
1049        struct expect_report expect_access_writer = {
1050                .access = {
1051                        { test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE },
1052                        { test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
1053                },
1054        };
1055        struct expect_report expect_access_access = {
1056                .access = {
1057                        { test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE },
1058                        { test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE },
1059                },
1060        };
1061        struct expect_report never = {
1062                .access = {
1063                        { test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
1064                        { test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
1065                },
1066        };
1067        bool match_expect_access_writer = false;
1068        bool match_expect_access_access = false;
1069        bool match_never = false;
1070
1071        begin_test_checks(test_kernel_assert_access, test_kernel_assert_writer);
1072        do {
1073                match_expect_access_writer |= report_matches(&expect_access_writer);
1074                match_expect_access_access |= report_matches(&expect_access_access);
1075                match_never |= report_matches(&never);
1076        } while (!end_test_checks(match_never));
1077        KUNIT_EXPECT_TRUE(test, match_expect_access_writer);
1078        KUNIT_EXPECT_TRUE(test, match_expect_access_access);
1079        KUNIT_EXPECT_FALSE(test, match_never);
1080}
1081
1082__no_kcsan
1083static void test_assert_exclusive_bits_change(struct kunit *test)
1084{
1085        struct expect_report expect = {
1086                .access = {
1087                        { test_kernel_assert_bits_change, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
1088                        { test_kernel_change_bits, &test_var, sizeof(test_var),
1089                                KCSAN_ACCESS_WRITE | (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS) ? 0 : KCSAN_ACCESS_ATOMIC) },
1090                },
1091        };
1092        bool match_expect = false;
1093
1094        begin_test_checks(test_kernel_assert_bits_change, test_kernel_change_bits);
1095        do {
1096                match_expect = report_matches(&expect);
1097        } while (!end_test_checks(match_expect));
1098        KUNIT_EXPECT_TRUE(test, match_expect);
1099}
1100
1101__no_kcsan
1102static void test_assert_exclusive_bits_nochange(struct kunit *test)
1103{
1104        bool match_never = false;
1105
1106        begin_test_checks(test_kernel_assert_bits_nochange, test_kernel_change_bits);
1107        do {
1108                match_never = report_available();
1109        } while (!end_test_checks(match_never));
1110        KUNIT_EXPECT_FALSE(test, match_never);
1111}
1112
1113__no_kcsan
1114static void test_assert_exclusive_writer_scoped(struct kunit *test)
1115{
1116        struct expect_report expect_start = {
1117                .access = {
1118                        { test_kernel_assert_writer_scoped, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_SCOPED },
1119                        { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
1120                },
1121        };
1122        struct expect_report expect_inscope = {
1123                .access = {
1124                        { test_enter_scope, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_SCOPED },
1125                        { test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
1126                },
1127        };
1128        bool match_expect_start = false;
1129        bool match_expect_inscope = false;
1130
1131        begin_test_checks(test_kernel_assert_writer_scoped, test_kernel_write_nochange);
1132        do {
1133                match_expect_start |= report_matches(&expect_start);
1134                match_expect_inscope |= report_matches(&expect_inscope);
1135        } while (!end_test_checks(match_expect_inscope));
1136        KUNIT_EXPECT_TRUE(test, match_expect_start);
1137        KUNIT_EXPECT_FALSE(test, match_expect_inscope);
1138}
1139
1140__no_kcsan
1141static void test_assert_exclusive_access_scoped(struct kunit *test)
1142{
1143        struct expect_report expect_start1 = {
1144                .access = {
1145                        { test_kernel_assert_access_scoped, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_SCOPED },
1146                        { test_kernel_read, &test_var, sizeof(test_var), 0 },
1147                },
1148        };
1149        struct expect_report expect_start2 = {
1150                .access = { expect_start1.access[0], expect_start1.access[0] },
1151        };
1152        struct expect_report expect_inscope = {
1153                .access = {
1154                        { test_enter_scope, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_SCOPED },
1155                        { test_kernel_read, &test_var, sizeof(test_var), 0 },
1156                },
1157        };
1158        bool match_expect_start = false;
1159        bool match_expect_inscope = false;
1160
1161        begin_test_checks(test_kernel_assert_access_scoped, test_kernel_read);
1162        end_time += msecs_to_jiffies(1000); /* This test requires a bit more time. */
1163        do {
1164                match_expect_start |= report_matches(&expect_start1) || report_matches(&expect_start2);
1165                match_expect_inscope |= report_matches(&expect_inscope);
1166        } while (!end_test_checks(match_expect_inscope));
1167        KUNIT_EXPECT_TRUE(test, match_expect_start);
1168        KUNIT_EXPECT_FALSE(test, match_expect_inscope);
1169}
1170
1171/*
1172 * jiffies is special (declared to be volatile) and its accesses are typically
1173 * not marked; this test ensures that the compiler nor KCSAN gets confused about
1174 * jiffies's declaration on different architectures.
1175 */
1176__no_kcsan
1177static void test_jiffies_noreport(struct kunit *test)
1178{
1179        bool match_never = false;
1180
1181        begin_test_checks(test_kernel_jiffies_reader, test_kernel_jiffies_reader);
1182        do {
1183                match_never = report_available();
1184        } while (!end_test_checks(match_never));
1185        KUNIT_EXPECT_FALSE(test, match_never);
1186}
1187
1188/* Test that racing accesses in seqlock critical sections are not reported. */
1189__no_kcsan
1190static void test_seqlock_noreport(struct kunit *test)
1191{
1192        bool match_never = false;
1193
1194        begin_test_checks(test_kernel_seqlock_reader, test_kernel_seqlock_writer);
1195        do {
1196                match_never = report_available();
1197        } while (!end_test_checks(match_never));
1198        KUNIT_EXPECT_FALSE(test, match_never);
1199}
1200
1201/*
1202 * Test atomic builtins work and required instrumentation functions exist. We
1203 * also test that KCSAN understands they're atomic by racing with them via
1204 * test_kernel_atomic_builtins(), and expect no reports.
1205 *
1206 * The atomic builtins _SHOULD NOT_ be used in normal kernel code!
1207 */
1208static void test_atomic_builtins(struct kunit *test)
1209{
1210        bool match_never = false;
1211
1212        begin_test_checks(test_kernel_atomic_builtins, test_kernel_atomic_builtins);
1213        do {
1214                long tmp;
1215
1216                kcsan_enable_current();
1217
1218                __atomic_store_n(&test_var, 42L, __ATOMIC_RELAXED);
1219                KUNIT_EXPECT_EQ(test, 42L, __atomic_load_n(&test_var, __ATOMIC_RELAXED));
1220
1221                KUNIT_EXPECT_EQ(test, 42L, __atomic_exchange_n(&test_var, 20, __ATOMIC_RELAXED));
1222                KUNIT_EXPECT_EQ(test, 20L, test_var);
1223
1224                tmp = 20L;
1225                KUNIT_EXPECT_TRUE(test, __atomic_compare_exchange_n(&test_var, &tmp, 30L,
1226                                                                    0, __ATOMIC_RELAXED,
1227                                                                    __ATOMIC_RELAXED));
1228                KUNIT_EXPECT_EQ(test, tmp, 20L);
1229                KUNIT_EXPECT_EQ(test, test_var, 30L);
1230                KUNIT_EXPECT_FALSE(test, __atomic_compare_exchange_n(&test_var, &tmp, 40L,
1231                                                                     1, __ATOMIC_RELAXED,
1232                                                                     __ATOMIC_RELAXED));
1233                KUNIT_EXPECT_EQ(test, tmp, 30L);
1234                KUNIT_EXPECT_EQ(test, test_var, 30L);
1235
1236                KUNIT_EXPECT_EQ(test, 30L, __atomic_fetch_add(&test_var, 1, __ATOMIC_RELAXED));
1237                KUNIT_EXPECT_EQ(test, 31L, __atomic_fetch_sub(&test_var, 1, __ATOMIC_RELAXED));
1238                KUNIT_EXPECT_EQ(test, 30L, __atomic_fetch_and(&test_var, 0xf, __ATOMIC_RELAXED));
1239                KUNIT_EXPECT_EQ(test, 14L, __atomic_fetch_xor(&test_var, 0xf, __ATOMIC_RELAXED));
1240                KUNIT_EXPECT_EQ(test, 1L, __atomic_fetch_or(&test_var, 0xf0, __ATOMIC_RELAXED));
1241                KUNIT_EXPECT_EQ(test, 241L, __atomic_fetch_nand(&test_var, 0xf, __ATOMIC_RELAXED));
1242                KUNIT_EXPECT_EQ(test, -2L, test_var);
1243
1244                __atomic_thread_fence(__ATOMIC_SEQ_CST);
1245                __atomic_signal_fence(__ATOMIC_SEQ_CST);
1246
1247                kcsan_disable_current();
1248
1249                match_never = report_available();
1250        } while (!end_test_checks(match_never));
1251        KUNIT_EXPECT_FALSE(test, match_never);
1252}
1253
1254__no_kcsan
1255static void test_1bit_value_change(struct kunit *test)
1256{
1257        struct expect_report expect = {
1258                .access = {
1259                        { test_kernel_read, &test_var, sizeof(test_var), 0 },
1260                        { test_kernel_xor_1bit, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
1261                },
1262        };
1263        bool match = false;
1264
1265        begin_test_checks(test_kernel_read, test_kernel_xor_1bit);
1266        do {
1267                match = IS_ENABLED(CONFIG_KCSAN_PERMISSIVE)
1268                                ? report_available()
1269                                : report_matches(&expect);
1270        } while (!end_test_checks(match));
1271        if (IS_ENABLED(CONFIG_KCSAN_PERMISSIVE))
1272                KUNIT_EXPECT_FALSE(test, match);
1273        else
1274                KUNIT_EXPECT_TRUE(test, match);
1275}
1276
1277__no_kcsan
1278static void test_correct_barrier(struct kunit *test)
1279{
1280        struct expect_report expect = {
1281                .access = {
1282                        { test_kernel_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
1283                        { test_kernel_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) },
1284                },
1285        };
1286        bool match_expect = false;
1287
1288        test_struct.val[0] = 0; /* init unlocked */
1289        begin_test_checks(test_kernel_with_memorder, test_kernel_with_memorder);
1290        do {
1291                match_expect = report_matches_any_reordered(&expect);
1292        } while (!end_test_checks(match_expect));
1293        KUNIT_EXPECT_FALSE(test, match_expect);
1294}
1295
1296__no_kcsan
1297static void test_missing_barrier(struct kunit *test)
1298{
1299        struct expect_report expect = {
1300                .access = {
1301                        { test_kernel_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
1302                        { test_kernel_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) },
1303                },
1304        };
1305        bool match_expect = false;
1306
1307        test_struct.val[0] = 0; /* init unlocked */
1308        begin_test_checks(test_kernel_wrong_memorder, test_kernel_wrong_memorder);
1309        do {
1310                match_expect = report_matches_any_reordered(&expect);
1311        } while (!end_test_checks(match_expect));
1312        if (IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY))
1313                KUNIT_EXPECT_TRUE(test, match_expect);
1314        else
1315                KUNIT_EXPECT_FALSE(test, match_expect);
1316}
1317
1318__no_kcsan
1319static void test_atomic_builtins_correct_barrier(struct kunit *test)
1320{
1321        struct expect_report expect = {
1322                .access = {
1323                        { test_kernel_atomic_builtin_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
1324                        { test_kernel_atomic_builtin_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) },
1325                },
1326        };
1327        bool match_expect = false;
1328
1329        test_struct.val[0] = 0; /* init unlocked */
1330        begin_test_checks(test_kernel_atomic_builtin_with_memorder,
1331                          test_kernel_atomic_builtin_with_memorder);
1332        do {
1333                match_expect = report_matches_any_reordered(&expect);
1334        } while (!end_test_checks(match_expect));
1335        KUNIT_EXPECT_FALSE(test, match_expect);
1336}
1337
1338__no_kcsan
1339static void test_atomic_builtins_missing_barrier(struct kunit *test)
1340{
1341        struct expect_report expect = {
1342                .access = {
1343                        { test_kernel_atomic_builtin_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
1344                        { test_kernel_atomic_builtin_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) },
1345                },
1346        };
1347        bool match_expect = false;
1348
1349        test_struct.val[0] = 0; /* init unlocked */
1350        begin_test_checks(test_kernel_atomic_builtin_wrong_memorder,
1351                          test_kernel_atomic_builtin_wrong_memorder);
1352        do {
1353                match_expect = report_matches_any_reordered(&expect);
1354        } while (!end_test_checks(match_expect));
1355        if (IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY))
1356                KUNIT_EXPECT_TRUE(test, match_expect);
1357        else
1358                KUNIT_EXPECT_FALSE(test, match_expect);
1359}
1360
1361/*
1362 * Generate thread counts for all test cases. Values generated are in interval
1363 * [2, 5] followed by exponentially increasing thread counts from 8 to 32.
1364 *
1365 * The thread counts are chosen to cover potentially interesting boundaries and
1366 * corner cases (2 to 5), and then stress the system with larger counts.
1367 */
1368static const void *nthreads_gen_params(const void *prev, char *desc)
1369{
1370        long nthreads = (long)prev;
1371
1372        if (nthreads < 0 || nthreads >= 32)
1373                nthreads = 0; /* stop */
1374        else if (!nthreads)
1375                nthreads = 2; /* initial value */
1376        else if (nthreads < 5)
1377                nthreads++;
1378        else if (nthreads == 5)
1379                nthreads = 8;
1380        else
1381                nthreads *= 2;
1382
1383        if (!preempt_model_preemptible() ||
1384            !IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER)) {
1385                /*
1386                 * Without any preemption, keep 2 CPUs free for other tasks, one
1387                 * of which is the main test case function checking for
1388                 * completion or failure.
1389                 */
1390                const long min_unused_cpus = preempt_model_none() ? 2 : 0;
1391                const long min_required_cpus = 2 + min_unused_cpus;
1392
1393                if (num_online_cpus() < min_required_cpus) {
1394                        pr_err_once("Too few online CPUs (%u < %ld) for test\n",
1395                                    num_online_cpus(), min_required_cpus);
1396                        nthreads = 0;
1397                } else if (nthreads >= num_online_cpus() - min_unused_cpus) {
1398                        /* Use negative value to indicate last param. */
1399                        nthreads = -(num_online_cpus() - min_unused_cpus);
1400                        pr_warn_once("Limiting number of threads to %ld (only %d online CPUs)\n",
1401                                     -nthreads, num_online_cpus());
1402                }
1403        }
1404
1405        snprintf(desc, KUNIT_PARAM_DESC_SIZE, "threads=%ld", abs(nthreads));
1406        return (void *)nthreads;
1407}
1408
1409#define KCSAN_KUNIT_CASE(test_name) KUNIT_CASE_PARAM(test_name, nthreads_gen_params)
1410static struct kunit_case kcsan_test_cases[] = {
1411        KUNIT_CASE(test_barrier_nothreads),
1412        KCSAN_KUNIT_CASE(test_basic),
1413        KCSAN_KUNIT_CASE(test_concurrent_races),
1414        KCSAN_KUNIT_CASE(test_novalue_change),
1415        KCSAN_KUNIT_CASE(test_novalue_change_exception),
1416        KCSAN_KUNIT_CASE(test_unknown_origin),
1417        KCSAN_KUNIT_CASE(test_write_write_assume_atomic),
1418        KCSAN_KUNIT_CASE(test_write_write_struct),
1419        KCSAN_KUNIT_CASE(test_write_write_struct_part),
1420        KCSAN_KUNIT_CASE(test_read_atomic_write_atomic),
1421        KCSAN_KUNIT_CASE(test_read_plain_atomic_write),
1422        KCSAN_KUNIT_CASE(test_read_plain_atomic_rmw),
1423        KCSAN_KUNIT_CASE(test_zero_size_access),
1424        KCSAN_KUNIT_CASE(test_data_race),
1425        KCSAN_KUNIT_CASE(test_assert_exclusive_writer),
1426        KCSAN_KUNIT_CASE(test_assert_exclusive_access),
1427        KCSAN_KUNIT_CASE(test_assert_exclusive_access_writer),
1428        KCSAN_KUNIT_CASE(test_assert_exclusive_bits_change),
1429        KCSAN_KUNIT_CASE(test_assert_exclusive_bits_nochange),
1430        KCSAN_KUNIT_CASE(test_assert_exclusive_writer_scoped),
1431        KCSAN_KUNIT_CASE(test_assert_exclusive_access_scoped),
1432        KCSAN_KUNIT_CASE(test_jiffies_noreport),
1433        KCSAN_KUNIT_CASE(test_seqlock_noreport),
1434        KCSAN_KUNIT_CASE(test_atomic_builtins),
1435        KCSAN_KUNIT_CASE(test_1bit_value_change),
1436        KCSAN_KUNIT_CASE(test_correct_barrier),
1437        KCSAN_KUNIT_CASE(test_missing_barrier),
1438        KCSAN_KUNIT_CASE(test_atomic_builtins_correct_barrier),
1439        KCSAN_KUNIT_CASE(test_atomic_builtins_missing_barrier),
1440        {},
1441};
1442
1443/* ===== End test cases ===== */
1444
1445/* Concurrent accesses from interrupts. */
1446__no_kcsan
1447static void access_thread_timer(struct timer_list *timer)
1448{
1449        static atomic_t cnt = ATOMIC_INIT(0);
1450        unsigned int idx;
1451        void (*func)(void);
1452
1453        idx = (unsigned int)atomic_inc_return(&cnt) % ARRAY_SIZE(access_kernels);
1454        /* Acquire potential initialization. */
1455        func = smp_load_acquire(&access_kernels[idx]);
1456        if (func)
1457                func();
1458}
1459
1460/* The main loop for each thread. */
1461__no_kcsan
1462static int access_thread(void *arg)
1463{
1464        struct timer_list timer;
1465        unsigned int cnt = 0;
1466        unsigned int idx;
1467        void (*func)(void);
1468
1469        timer_setup_on_stack(&timer, access_thread_timer, 0);
1470        do {
1471                might_sleep();
1472
1473                if (!timer_pending(&timer))
1474                        mod_timer(&timer, jiffies + 1);
1475                else {
1476                        /* Iterate through all kernels. */
1477                        idx = cnt++ % ARRAY_SIZE(access_kernels);
1478                        /* Acquire potential initialization. */
1479                        func = smp_load_acquire(&access_kernels[idx]);
1480                        if (func)
1481                                func();
1482                }
1483        } while (!torture_must_stop());
1484        del_timer_sync(&timer);
1485        destroy_timer_on_stack(&timer);
1486
1487        torture_kthread_stopping("access_thread");
1488        return 0;
1489}
1490
1491__no_kcsan
1492static int test_init(struct kunit *test)
1493{
1494        unsigned long flags;
1495        int nthreads;
1496        int i;
1497
1498        spin_lock_irqsave(&observed.lock, flags);
1499        for (i = 0; i < ARRAY_SIZE(observed.lines); ++i)
1500                observed.lines[i][0] = '\0';
1501        observed.nlines = 0;
1502        spin_unlock_irqrestore(&observed.lock, flags);
1503
1504        if (strstr(test->name, "nothreads"))
1505                return 0;
1506
1507        if (!torture_init_begin((char *)test->name, 1))
1508                return -EBUSY;
1509
1510        if (WARN_ON(threads))
1511                goto err;
1512
1513        for (i = 0; i < ARRAY_SIZE(access_kernels); ++i) {
1514                if (WARN_ON(access_kernels[i]))
1515                        goto err;
1516        }
1517
1518        nthreads = abs((long)test->param_value);
1519        if (WARN_ON(!nthreads))
1520                goto err;
1521
1522        threads = kcalloc(nthreads + 1, sizeof(struct task_struct *), GFP_KERNEL);
1523        if (WARN_ON(!threads))
1524                goto err;
1525
1526        threads[nthreads] = NULL;
1527        for (i = 0; i < nthreads; ++i) {
1528                if (torture_create_kthread(access_thread, NULL, threads[i]))
1529                        goto err;
1530        }
1531
1532        torture_init_end();
1533
1534        return 0;
1535
1536err:
1537        kfree(threads);
1538        threads = NULL;
1539        torture_init_end();
1540        return -EINVAL;
1541}
1542
1543__no_kcsan
1544static void test_exit(struct kunit *test)
1545{
1546        struct task_struct **stop_thread;
1547        int i;
1548
1549        if (strstr(test->name, "nothreads"))
1550                return;
1551
1552        if (torture_cleanup_begin())
1553                return;
1554
1555        for (i = 0; i < ARRAY_SIZE(access_kernels); ++i)
1556                WRITE_ONCE(access_kernels[i], NULL);
1557
1558        if (threads) {
1559                for (stop_thread = threads; *stop_thread; stop_thread++)
1560                        torture_stop_kthread(reader_thread, *stop_thread);
1561
1562                kfree(threads);
1563                threads = NULL;
1564        }
1565
1566        torture_cleanup_end();
1567}
1568
1569__no_kcsan
1570static void register_tracepoints(struct tracepoint *tp, void *ignore)
1571{
1572        check_trace_callback_type_console(probe_console);
1573        if (!strcmp(tp->name, "console"))
1574                WARN_ON(tracepoint_probe_register(tp, probe_console, NULL));
1575}
1576
1577__no_kcsan
1578static void unregister_tracepoints(struct tracepoint *tp, void *ignore)
1579{
1580        if (!strcmp(tp->name, "console"))
1581                tracepoint_probe_unregister(tp, probe_console, NULL);
1582}
1583
1584static int kcsan_suite_init(struct kunit_suite *suite)
1585{
1586        /*
1587         * Because we want to be able to build the test as a module, we need to
1588         * iterate through all known tracepoints, since the static registration
1589         * won't work here.
1590         */
1591        for_each_kernel_tracepoint(register_tracepoints, NULL);
1592        return 0;
1593}
1594
1595static void kcsan_suite_exit(struct kunit_suite *suite)
1596{
1597        for_each_kernel_tracepoint(unregister_tracepoints, NULL);
1598        tracepoint_synchronize_unregister();
1599}
1600
1601static struct kunit_suite kcsan_test_suite = {
1602        .name = "kcsan",
1603        .test_cases = kcsan_test_cases,
1604        .init = test_init,
1605        .exit = test_exit,
1606        .suite_init = kcsan_suite_init,
1607        .suite_exit = kcsan_suite_exit,
1608};
1609
1610kunit_test_suites(&kcsan_test_suite);
1611
1612MODULE_LICENSE("GPL v2");
1613MODULE_AUTHOR("Marco Elver <elver@google.com>");
1614