linux/kernel/trace/trace_selftest.c
<<
>>
Prefs
   1/* Include in trace.c */
   2
   3#include <linux/stringify.h>
   4#include <linux/kthread.h>
   5#include <linux/delay.h>
   6#include <linux/slab.h>
   7
   8static inline int trace_valid_entry(struct trace_entry *entry)
   9{
  10        switch (entry->type) {
  11        case TRACE_FN:
  12        case TRACE_CTX:
  13        case TRACE_WAKE:
  14        case TRACE_STACK:
  15        case TRACE_PRINT:
  16        case TRACE_BRANCH:
  17        case TRACE_GRAPH_ENT:
  18        case TRACE_GRAPH_RET:
  19                return 1;
  20        }
  21        return 0;
  22}
  23
  24static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
  25{
  26        struct ring_buffer_event *event;
  27        struct trace_entry *entry;
  28        unsigned int loops = 0;
  29
  30        while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
  31                entry = ring_buffer_event_data(event);
  32
  33                /*
  34                 * The ring buffer is a size of trace_buf_size, if
  35                 * we loop more than the size, there's something wrong
  36                 * with the ring buffer.
  37                 */
  38                if (loops++ > trace_buf_size) {
  39                        printk(KERN_CONT ".. bad ring buffer ");
  40                        goto failed;
  41                }
  42                if (!trace_valid_entry(entry)) {
  43                        printk(KERN_CONT ".. invalid entry %d ",
  44                                entry->type);
  45                        goto failed;
  46                }
  47        }
  48        return 0;
  49
  50 failed:
  51        /* disable tracing */
  52        tracing_disabled = 1;
  53        printk(KERN_CONT ".. corrupted trace buffer .. ");
  54        return -1;
  55}
  56
  57/*
  58 * Test the trace buffer to see if all the elements
  59 * are still sane.
  60 */
  61static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
  62{
  63        unsigned long flags, cnt = 0;
  64        int cpu, ret = 0;
  65
  66        /* Don't allow flipping of max traces now */
  67        local_irq_save(flags);
  68        arch_spin_lock(&ftrace_max_lock);
  69
  70        cnt = ring_buffer_entries(tr->buffer);
  71
  72        /*
  73         * The trace_test_buffer_cpu runs a while loop to consume all data.
  74         * If the calling tracer is broken, and is constantly filling
  75         * the buffer, this will run forever, and hard lock the box.
  76         * We disable the ring buffer while we do this test to prevent
  77         * a hard lock up.
  78         */
  79        tracing_off();
  80        for_each_possible_cpu(cpu) {
  81                ret = trace_test_buffer_cpu(tr, cpu);
  82                if (ret)
  83                        break;
  84        }
  85        tracing_on();
  86        arch_spin_unlock(&ftrace_max_lock);
  87        local_irq_restore(flags);
  88
  89        if (count)
  90                *count = cnt;
  91
  92        return ret;
  93}
  94
  95static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  96{
  97        printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
  98                trace->name, init_ret);
  99}
 100#ifdef CONFIG_FUNCTION_TRACER
 101
 102#ifdef CONFIG_DYNAMIC_FTRACE
 103
 104static int trace_selftest_test_probe1_cnt;
 105static void trace_selftest_test_probe1_func(unsigned long ip,
 106                                            unsigned long pip,
 107                                            struct ftrace_ops *op,
 108                                            struct pt_regs *pt_regs)
 109{
 110        trace_selftest_test_probe1_cnt++;
 111}
 112
 113static int trace_selftest_test_probe2_cnt;
 114static void trace_selftest_test_probe2_func(unsigned long ip,
 115                                            unsigned long pip,
 116                                            struct ftrace_ops *op,
 117                                            struct pt_regs *pt_regs)
 118{
 119        trace_selftest_test_probe2_cnt++;
 120}
 121
 122static int trace_selftest_test_probe3_cnt;
 123static void trace_selftest_test_probe3_func(unsigned long ip,
 124                                            unsigned long pip,
 125                                            struct ftrace_ops *op,
 126                                            struct pt_regs *pt_regs)
 127{
 128        trace_selftest_test_probe3_cnt++;
 129}
 130
 131static int trace_selftest_test_global_cnt;
 132static void trace_selftest_test_global_func(unsigned long ip,
 133                                            unsigned long pip,
 134                                            struct ftrace_ops *op,
 135                                            struct pt_regs *pt_regs)
 136{
 137        trace_selftest_test_global_cnt++;
 138}
 139
 140static int trace_selftest_test_dyn_cnt;
 141static void trace_selftest_test_dyn_func(unsigned long ip,
 142                                         unsigned long pip,
 143                                         struct ftrace_ops *op,
 144                                         struct pt_regs *pt_regs)
 145{
 146        trace_selftest_test_dyn_cnt++;
 147}
 148
 149static struct ftrace_ops test_probe1 = {
 150        .func                   = trace_selftest_test_probe1_func,
 151        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
 152};
 153
 154static struct ftrace_ops test_probe2 = {
 155        .func                   = trace_selftest_test_probe2_func,
 156        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
 157};
 158
 159static struct ftrace_ops test_probe3 = {
 160        .func                   = trace_selftest_test_probe3_func,
 161        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
 162};
 163
 164static struct ftrace_ops test_global = {
 165        .func           = trace_selftest_test_global_func,
 166        .flags          = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
 167};
 168
 169static void print_counts(void)
 170{
 171        printk("(%d %d %d %d %d) ",
 172               trace_selftest_test_probe1_cnt,
 173               trace_selftest_test_probe2_cnt,
 174               trace_selftest_test_probe3_cnt,
 175               trace_selftest_test_global_cnt,
 176               trace_selftest_test_dyn_cnt);
 177}
 178
 179static void reset_counts(void)
 180{
 181        trace_selftest_test_probe1_cnt = 0;
 182        trace_selftest_test_probe2_cnt = 0;
 183        trace_selftest_test_probe3_cnt = 0;
 184        trace_selftest_test_global_cnt = 0;
 185        trace_selftest_test_dyn_cnt = 0;
 186}
 187
 188static int trace_selftest_ops(int cnt)
 189{
 190        int save_ftrace_enabled = ftrace_enabled;
 191        struct ftrace_ops *dyn_ops;
 192        char *func1_name;
 193        char *func2_name;
 194        int len1;
 195        int len2;
 196        int ret = -1;
 197
 198        printk(KERN_CONT "PASSED\n");
 199        pr_info("Testing dynamic ftrace ops #%d: ", cnt);
 200
 201        ftrace_enabled = 1;
 202        reset_counts();
 203
 204        /* Handle PPC64 '.' name */
 205        func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 206        func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
 207        len1 = strlen(func1_name);
 208        len2 = strlen(func2_name);
 209
 210        /*
 211         * Probe 1 will trace function 1.
 212         * Probe 2 will trace function 2.
 213         * Probe 3 will trace functions 1 and 2.
 214         */
 215        ftrace_set_filter(&test_probe1, func1_name, len1, 1);
 216        ftrace_set_filter(&test_probe2, func2_name, len2, 1);
 217        ftrace_set_filter(&test_probe3, func1_name, len1, 1);
 218        ftrace_set_filter(&test_probe3, func2_name, len2, 0);
 219
 220        register_ftrace_function(&test_probe1);
 221        register_ftrace_function(&test_probe2);
 222        register_ftrace_function(&test_probe3);
 223        register_ftrace_function(&test_global);
 224
 225        DYN_FTRACE_TEST_NAME();
 226
 227        print_counts();
 228
 229        if (trace_selftest_test_probe1_cnt != 1)
 230                goto out;
 231        if (trace_selftest_test_probe2_cnt != 0)
 232                goto out;
 233        if (trace_selftest_test_probe3_cnt != 1)
 234                goto out;
 235        if (trace_selftest_test_global_cnt == 0)
 236                goto out;
 237
 238        DYN_FTRACE_TEST_NAME2();
 239
 240        print_counts();
 241
 242        if (trace_selftest_test_probe1_cnt != 1)
 243                goto out;
 244        if (trace_selftest_test_probe2_cnt != 1)
 245                goto out;
 246        if (trace_selftest_test_probe3_cnt != 2)
 247                goto out;
 248
 249        /* Add a dynamic probe */
 250        dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
 251        if (!dyn_ops) {
 252                printk("MEMORY ERROR ");
 253                goto out;
 254        }
 255
 256        dyn_ops->func = trace_selftest_test_dyn_func;
 257
 258        register_ftrace_function(dyn_ops);
 259
 260        trace_selftest_test_global_cnt = 0;
 261
 262        DYN_FTRACE_TEST_NAME();
 263
 264        print_counts();
 265
 266        if (trace_selftest_test_probe1_cnt != 2)
 267                goto out_free;
 268        if (trace_selftest_test_probe2_cnt != 1)
 269                goto out_free;
 270        if (trace_selftest_test_probe3_cnt != 3)
 271                goto out_free;
 272        if (trace_selftest_test_global_cnt == 0)
 273                goto out;
 274        if (trace_selftest_test_dyn_cnt == 0)
 275                goto out_free;
 276
 277        DYN_FTRACE_TEST_NAME2();
 278
 279        print_counts();
 280
 281        if (trace_selftest_test_probe1_cnt != 2)
 282                goto out_free;
 283        if (trace_selftest_test_probe2_cnt != 2)
 284                goto out_free;
 285        if (trace_selftest_test_probe3_cnt != 4)
 286                goto out_free;
 287
 288        ret = 0;
 289 out_free:
 290        unregister_ftrace_function(dyn_ops);
 291        kfree(dyn_ops);
 292
 293 out:
 294        /* Purposely unregister in the same order */
 295        unregister_ftrace_function(&test_probe1);
 296        unregister_ftrace_function(&test_probe2);
 297        unregister_ftrace_function(&test_probe3);
 298        unregister_ftrace_function(&test_global);
 299
 300        /* Make sure everything is off */
 301        reset_counts();
 302        DYN_FTRACE_TEST_NAME();
 303        DYN_FTRACE_TEST_NAME();
 304
 305        if (trace_selftest_test_probe1_cnt ||
 306            trace_selftest_test_probe2_cnt ||
 307            trace_selftest_test_probe3_cnt ||
 308            trace_selftest_test_global_cnt ||
 309            trace_selftest_test_dyn_cnt)
 310                ret = -1;
 311
 312        ftrace_enabled = save_ftrace_enabled;
 313
 314        return ret;
 315}
 316
 317/* Test dynamic code modification and ftrace filters */
 318int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
 319                                           struct trace_array *tr,
 320                                           int (*func)(void))
 321{
 322        int save_ftrace_enabled = ftrace_enabled;
 323        int save_tracer_enabled = tracer_enabled;
 324        unsigned long count;
 325        char *func_name;
 326        int ret;
 327
 328        /* The ftrace test PASSED */
 329        printk(KERN_CONT "PASSED\n");
 330        pr_info("Testing dynamic ftrace: ");
 331
 332        /* enable tracing, and record the filter function */
 333        ftrace_enabled = 1;
 334        tracer_enabled = 1;
 335
 336        /* passed in by parameter to fool gcc from optimizing */
 337        func();
 338
 339        /*
 340         * Some archs *cough*PowerPC*cough* add characters to the
 341         * start of the function names. We simply put a '*' to
 342         * accommodate them.
 343         */
 344        func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 345
 346        /* filter only on our function */
 347        ftrace_set_global_filter(func_name, strlen(func_name), 1);
 348
 349        /* enable tracing */
 350        ret = tracer_init(trace, tr);
 351        if (ret) {
 352                warn_failed_init_tracer(trace, ret);
 353                goto out;
 354        }
 355
 356        /* Sleep for a 1/10 of a second */
 357        msleep(100);
 358
 359        /* we should have nothing in the buffer */
 360        ret = trace_test_buffer(tr, &count);
 361        if (ret)
 362                goto out;
 363
 364        if (count) {
 365                ret = -1;
 366                printk(KERN_CONT ".. filter did not filter .. ");
 367                goto out;
 368        }
 369
 370        /* call our function again */
 371        func();
 372
 373        /* sleep again */
 374        msleep(100);
 375
 376        /* stop the tracing. */
 377        tracing_stop();
 378        ftrace_enabled = 0;
 379
 380        /* check the trace buffer */
 381        ret = trace_test_buffer(tr, &count);
 382        tracing_start();
 383
 384        /* we should only have one item */
 385        if (!ret && count != 1) {
 386                trace->reset(tr);
 387                printk(KERN_CONT ".. filter failed count=%ld ..", count);
 388                ret = -1;
 389                goto out;
 390        }
 391
 392        /* Test the ops with global tracing running */
 393        ret = trace_selftest_ops(1);
 394        trace->reset(tr);
 395
 396 out:
 397        ftrace_enabled = save_ftrace_enabled;
 398        tracer_enabled = save_tracer_enabled;
 399
 400        /* Enable tracing on all functions again */
 401        ftrace_set_global_filter(NULL, 0, 1);
 402
 403        /* Test the ops with global tracing off */
 404        if (!ret)
 405                ret = trace_selftest_ops(2);
 406
 407        return ret;
 408}
 409
 410static int trace_selftest_recursion_cnt;
 411static void trace_selftest_test_recursion_func(unsigned long ip,
 412                                               unsigned long pip,
 413                                               struct ftrace_ops *op,
 414                                               struct pt_regs *pt_regs)
 415{
 416        /*
 417         * This function is registered without the recursion safe flag.
 418         * The ftrace infrastructure should provide the recursion
 419         * protection. If not, this will crash the kernel!
 420         */
 421        trace_selftest_recursion_cnt++;
 422        DYN_FTRACE_TEST_NAME();
 423}
 424
 425static void trace_selftest_test_recursion_safe_func(unsigned long ip,
 426                                                    unsigned long pip,
 427                                                    struct ftrace_ops *op,
 428                                                    struct pt_regs *pt_regs)
 429{
 430        /*
 431         * We said we would provide our own recursion. By calling
 432         * this function again, we should recurse back into this function
 433         * and count again. But this only happens if the arch supports
 434         * all of ftrace features and nothing else is using the function
 435         * tracing utility.
 436         */
 437        if (trace_selftest_recursion_cnt++)
 438                return;
 439        DYN_FTRACE_TEST_NAME();
 440}
 441
 442static struct ftrace_ops test_rec_probe = {
 443        .func                   = trace_selftest_test_recursion_func,
 444};
 445
 446static struct ftrace_ops test_recsafe_probe = {
 447        .func                   = trace_selftest_test_recursion_safe_func,
 448        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
 449};
 450
 451static int
 452trace_selftest_function_recursion(void)
 453{
 454        int save_ftrace_enabled = ftrace_enabled;
 455        int save_tracer_enabled = tracer_enabled;
 456        char *func_name;
 457        int len;
 458        int ret;
 459        int cnt;
 460
 461        /* The previous test PASSED */
 462        pr_cont("PASSED\n");
 463        pr_info("Testing ftrace recursion: ");
 464
 465
 466        /* enable tracing, and record the filter function */
 467        ftrace_enabled = 1;
 468        tracer_enabled = 1;
 469
 470        /* Handle PPC64 '.' name */
 471        func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 472        len = strlen(func_name);
 473
 474        ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
 475        if (ret) {
 476                pr_cont("*Could not set filter* ");
 477                goto out;
 478        }
 479
 480        ret = register_ftrace_function(&test_rec_probe);
 481        if (ret) {
 482                pr_cont("*could not register callback* ");
 483                goto out;
 484        }
 485
 486        DYN_FTRACE_TEST_NAME();
 487
 488        unregister_ftrace_function(&test_rec_probe);
 489
 490        ret = -1;
 491        if (trace_selftest_recursion_cnt != 1) {
 492                pr_cont("*callback not called once (%d)* ",
 493                        trace_selftest_recursion_cnt);
 494                goto out;
 495        }
 496
 497        trace_selftest_recursion_cnt = 1;
 498
 499        pr_cont("PASSED\n");
 500        pr_info("Testing ftrace recursion safe: ");
 501
 502        ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
 503        if (ret) {
 504                pr_cont("*Could not set filter* ");
 505                goto out;
 506        }
 507
 508        ret = register_ftrace_function(&test_recsafe_probe);
 509        if (ret) {
 510                pr_cont("*could not register callback* ");
 511                goto out;
 512        }
 513
 514        DYN_FTRACE_TEST_NAME();
 515
 516        unregister_ftrace_function(&test_recsafe_probe);
 517
 518        /*
 519         * If arch supports all ftrace features, and no other task
 520         * was on the list, we should be fine.
 521         */
 522        if (!ftrace_nr_registered_ops() && !FTRACE_FORCE_LIST_FUNC)
 523                cnt = 2; /* Should have recursed */
 524        else
 525                cnt = 1;
 526
 527        ret = -1;
 528        if (trace_selftest_recursion_cnt != cnt) {
 529                pr_cont("*callback not called expected %d times (%d)* ",
 530                        cnt, trace_selftest_recursion_cnt);
 531                goto out;
 532        }
 533
 534        ret = 0;
 535out:
 536        ftrace_enabled = save_ftrace_enabled;
 537        tracer_enabled = save_tracer_enabled;
 538
 539        return ret;
 540}
 541#else
 542# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
 543# define trace_selftest_function_recursion() ({ 0; })
 544#endif /* CONFIG_DYNAMIC_FTRACE */
 545
 546static enum {
 547        TRACE_SELFTEST_REGS_START,
 548        TRACE_SELFTEST_REGS_FOUND,
 549        TRACE_SELFTEST_REGS_NOT_FOUND,
 550} trace_selftest_regs_stat;
 551
 552static void trace_selftest_test_regs_func(unsigned long ip,
 553                                          unsigned long pip,
 554                                          struct ftrace_ops *op,
 555                                          struct pt_regs *pt_regs)
 556{
 557        if (pt_regs)
 558                trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
 559        else
 560                trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
 561}
 562
 563static struct ftrace_ops test_regs_probe = {
 564        .func           = trace_selftest_test_regs_func,
 565        .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
 566};
 567
 568static int
 569trace_selftest_function_regs(void)
 570{
 571        int save_ftrace_enabled = ftrace_enabled;
 572        int save_tracer_enabled = tracer_enabled;
 573        char *func_name;
 574        int len;
 575        int ret;
 576        int supported = 0;
 577
 578#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
 579        supported = 1;
 580#endif
 581
 582        /* The previous test PASSED */
 583        pr_cont("PASSED\n");
 584        pr_info("Testing ftrace regs%s: ",
 585                !supported ? "(no arch support)" : "");
 586
 587        /* enable tracing, and record the filter function */
 588        ftrace_enabled = 1;
 589        tracer_enabled = 1;
 590
 591        /* Handle PPC64 '.' name */
 592        func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 593        len = strlen(func_name);
 594
 595        ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
 596        /*
 597         * If DYNAMIC_FTRACE is not set, then we just trace all functions.
 598         * This test really doesn't care.
 599         */
 600        if (ret && ret != -ENODEV) {
 601                pr_cont("*Could not set filter* ");
 602                goto out;
 603        }
 604
 605        ret = register_ftrace_function(&test_regs_probe);
 606        /*
 607         * Now if the arch does not support passing regs, then this should
 608         * have failed.
 609         */
 610        if (!supported) {
 611                if (!ret) {
 612                        pr_cont("*registered save-regs without arch support* ");
 613                        goto out;
 614                }
 615                test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
 616                ret = register_ftrace_function(&test_regs_probe);
 617        }
 618        if (ret) {
 619                pr_cont("*could not register callback* ");
 620                goto out;
 621        }
 622
 623
 624        DYN_FTRACE_TEST_NAME();
 625
 626        unregister_ftrace_function(&test_regs_probe);
 627
 628        ret = -1;
 629
 630        switch (trace_selftest_regs_stat) {
 631        case TRACE_SELFTEST_REGS_START:
 632                pr_cont("*callback never called* ");
 633                goto out;
 634
 635        case TRACE_SELFTEST_REGS_FOUND:
 636                if (supported)
 637                        break;
 638                pr_cont("*callback received regs without arch support* ");
 639                goto out;
 640
 641        case TRACE_SELFTEST_REGS_NOT_FOUND:
 642                if (!supported)
 643                        break;
 644                pr_cont("*callback received NULL regs* ");
 645                goto out;
 646        }
 647
 648        ret = 0;
 649out:
 650        ftrace_enabled = save_ftrace_enabled;
 651        tracer_enabled = save_tracer_enabled;
 652
 653        return ret;
 654}
 655
 656/*
 657 * Simple verification test of ftrace function tracer.
 658 * Enable ftrace, sleep 1/10 second, and then read the trace
 659 * buffer to see if all is in order.
 660 */
 661int
 662trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 663{
 664        int save_ftrace_enabled = ftrace_enabled;
 665        int save_tracer_enabled = tracer_enabled;
 666        unsigned long count;
 667        int ret;
 668
 669        /* make sure msleep has been recorded */
 670        msleep(1);
 671
 672        /* start the tracing */
 673        ftrace_enabled = 1;
 674        tracer_enabled = 1;
 675
 676        ret = tracer_init(trace, tr);
 677        if (ret) {
 678                warn_failed_init_tracer(trace, ret);
 679                goto out;
 680        }
 681
 682        /* Sleep for a 1/10 of a second */
 683        msleep(100);
 684        /* stop the tracing. */
 685        tracing_stop();
 686        ftrace_enabled = 0;
 687
 688        /* check the trace buffer */
 689        ret = trace_test_buffer(tr, &count);
 690        trace->reset(tr);
 691        tracing_start();
 692
 693        if (!ret && !count) {
 694                printk(KERN_CONT ".. no entries found ..");
 695                ret = -1;
 696                goto out;
 697        }
 698
 699        ret = trace_selftest_startup_dynamic_tracing(trace, tr,
 700                                                     DYN_FTRACE_TEST_NAME);
 701        if (ret)
 702                goto out;
 703
 704        ret = trace_selftest_function_recursion();
 705        if (ret)
 706                goto out;
 707
 708        ret = trace_selftest_function_regs();
 709 out:
 710        ftrace_enabled = save_ftrace_enabled;
 711        tracer_enabled = save_tracer_enabled;
 712
 713        /* kill ftrace totally if we failed */
 714        if (ret)
 715                ftrace_kill();
 716
 717        return ret;
 718}
 719#endif /* CONFIG_FUNCTION_TRACER */
 720
 721
 722#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 723
 724/* Maximum number of functions to trace before diagnosing a hang */
 725#define GRAPH_MAX_FUNC_TEST     100000000
 726
 727static void
 728__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
 729static unsigned int graph_hang_thresh;
 730
 731/* Wrap the real function entry probe to avoid possible hanging */
 732static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
 733{
 734        /* This is harmlessly racy, we want to approximately detect a hang */
 735        if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
 736                ftrace_graph_stop();
 737                printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
 738                if (ftrace_dump_on_oops)
 739                        __ftrace_dump(false, DUMP_ALL);
 740                return 0;
 741        }
 742
 743        return trace_graph_entry(trace);
 744}
 745
 746/*
 747 * Pretty much the same than for the function tracer from which the selftest
 748 * has been borrowed.
 749 */
 750int
 751trace_selftest_startup_function_graph(struct tracer *trace,
 752                                        struct trace_array *tr)
 753{
 754        int ret;
 755        unsigned long count;
 756
 757        /*
 758         * Simulate the init() callback but we attach a watchdog callback
 759         * to detect and recover from possible hangs
 760         */
 761        tracing_reset_online_cpus(tr);
 762        set_graph_array(tr);
 763        ret = register_ftrace_graph(&trace_graph_return,
 764                                    &trace_graph_entry_watchdog);
 765        if (ret) {
 766                warn_failed_init_tracer(trace, ret);
 767                goto out;
 768        }
 769        tracing_start_cmdline_record();
 770
 771        /* Sleep for a 1/10 of a second */
 772        msleep(100);
 773
 774        /* Have we just recovered from a hang? */
 775        if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
 776                tracing_selftest_disabled = true;
 777                ret = -1;
 778                goto out;
 779        }
 780
 781        tracing_stop();
 782
 783        /* check the trace buffer */
 784        ret = trace_test_buffer(tr, &count);
 785
 786        trace->reset(tr);
 787        tracing_start();
 788
 789        if (!ret && !count) {
 790                printk(KERN_CONT ".. no entries found ..");
 791                ret = -1;
 792                goto out;
 793        }
 794
 795        /* Don't test dynamic tracing, the function tracer already did */
 796
 797out:
 798        /* Stop it if we failed */
 799        if (ret)
 800                ftrace_graph_stop();
 801
 802        return ret;
 803}
 804#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 805
 806
 807#ifdef CONFIG_IRQSOFF_TRACER
 808int
 809trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
 810{
 811        unsigned long save_max = tracing_max_latency;
 812        unsigned long count;
 813        int ret;
 814
 815        /* start the tracing */
 816        ret = tracer_init(trace, tr);
 817        if (ret) {
 818                warn_failed_init_tracer(trace, ret);
 819                return ret;
 820        }
 821
 822        /* reset the max latency */
 823        tracing_max_latency = 0;
 824        /* disable interrupts for a bit */
 825        local_irq_disable();
 826        udelay(100);
 827        local_irq_enable();
 828
 829        /*
 830         * Stop the tracer to avoid a warning subsequent
 831         * to buffer flipping failure because tracing_stop()
 832         * disables the tr and max buffers, making flipping impossible
 833         * in case of parallels max irqs off latencies.
 834         */
 835        trace->stop(tr);
 836        /* stop the tracing. */
 837        tracing_stop();
 838        /* check both trace buffers */
 839        ret = trace_test_buffer(tr, NULL);
 840        if (!ret)
 841                ret = trace_test_buffer(&max_tr, &count);
 842        trace->reset(tr);
 843        tracing_start();
 844
 845        if (!ret && !count) {
 846                printk(KERN_CONT ".. no entries found ..");
 847                ret = -1;
 848        }
 849
 850        tracing_max_latency = save_max;
 851
 852        return ret;
 853}
 854#endif /* CONFIG_IRQSOFF_TRACER */
 855
 856#ifdef CONFIG_PREEMPT_TRACER
 857int
 858trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
 859{
 860        unsigned long save_max = tracing_max_latency;
 861        unsigned long count;
 862        int ret;
 863
 864        /*
 865         * Now that the big kernel lock is no longer preemptable,
 866         * and this is called with the BKL held, it will always
 867         * fail. If preemption is already disabled, simply
 868         * pass the test. When the BKL is removed, or becomes
 869         * preemptible again, we will once again test this,
 870         * so keep it in.
 871         */
 872        if (preempt_count()) {
 873                printk(KERN_CONT "can not test ... force ");
 874                return 0;
 875        }
 876
 877        /* start the tracing */
 878        ret = tracer_init(trace, tr);
 879        if (ret) {
 880                warn_failed_init_tracer(trace, ret);
 881                return ret;
 882        }
 883
 884        /* reset the max latency */
 885        tracing_max_latency = 0;
 886        /* disable preemption for a bit */
 887        preempt_disable();
 888        udelay(100);
 889        preempt_enable();
 890
 891        /*
 892         * Stop the tracer to avoid a warning subsequent
 893         * to buffer flipping failure because tracing_stop()
 894         * disables the tr and max buffers, making flipping impossible
 895         * in case of parallels max preempt off latencies.
 896         */
 897        trace->stop(tr);
 898        /* stop the tracing. */
 899        tracing_stop();
 900        /* check both trace buffers */
 901        ret = trace_test_buffer(tr, NULL);
 902        if (!ret)
 903                ret = trace_test_buffer(&max_tr, &count);
 904        trace->reset(tr);
 905        tracing_start();
 906
 907        if (!ret && !count) {
 908                printk(KERN_CONT ".. no entries found ..");
 909                ret = -1;
 910        }
 911
 912        tracing_max_latency = save_max;
 913
 914        return ret;
 915}
 916#endif /* CONFIG_PREEMPT_TRACER */
 917
 918#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
 919int
 920trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
 921{
 922        unsigned long save_max = tracing_max_latency;
 923        unsigned long count;
 924        int ret;
 925
 926        /*
 927         * Now that the big kernel lock is no longer preemptable,
 928         * and this is called with the BKL held, it will always
 929         * fail. If preemption is already disabled, simply
 930         * pass the test. When the BKL is removed, or becomes
 931         * preemptible again, we will once again test this,
 932         * so keep it in.
 933         */
 934        if (preempt_count()) {
 935                printk(KERN_CONT "can not test ... force ");
 936                return 0;
 937        }
 938
 939        /* start the tracing */
 940        ret = tracer_init(trace, tr);
 941        if (ret) {
 942                warn_failed_init_tracer(trace, ret);
 943                goto out_no_start;
 944        }
 945
 946        /* reset the max latency */
 947        tracing_max_latency = 0;
 948
 949        /* disable preemption and interrupts for a bit */
 950        preempt_disable();
 951        local_irq_disable();
 952        udelay(100);
 953        preempt_enable();
 954        /* reverse the order of preempt vs irqs */
 955        local_irq_enable();
 956
 957        /*
 958         * Stop the tracer to avoid a warning subsequent
 959         * to buffer flipping failure because tracing_stop()
 960         * disables the tr and max buffers, making flipping impossible
 961         * in case of parallels max irqs/preempt off latencies.
 962         */
 963        trace->stop(tr);
 964        /* stop the tracing. */
 965        tracing_stop();
 966        /* check both trace buffers */
 967        ret = trace_test_buffer(tr, NULL);
 968        if (ret)
 969                goto out;
 970
 971        ret = trace_test_buffer(&max_tr, &count);
 972        if (ret)
 973                goto out;
 974
 975        if (!ret && !count) {
 976                printk(KERN_CONT ".. no entries found ..");
 977                ret = -1;
 978                goto out;
 979        }
 980
 981        /* do the test by disabling interrupts first this time */
 982        tracing_max_latency = 0;
 983        tracing_start();
 984        trace->start(tr);
 985
 986        preempt_disable();
 987        local_irq_disable();
 988        udelay(100);
 989        preempt_enable();
 990        /* reverse the order of preempt vs irqs */
 991        local_irq_enable();
 992
 993        trace->stop(tr);
 994        /* stop the tracing. */
 995        tracing_stop();
 996        /* check both trace buffers */
 997        ret = trace_test_buffer(tr, NULL);
 998        if (ret)
 999                goto out;
1000
1001        ret = trace_test_buffer(&max_tr, &count);
1002
1003        if (!ret && !count) {
1004                printk(KERN_CONT ".. no entries found ..");
1005                ret = -1;
1006                goto out;
1007        }
1008
1009out:
1010        tracing_start();
1011out_no_start:
1012        trace->reset(tr);
1013        tracing_max_latency = save_max;
1014
1015        return ret;
1016}
1017#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1018
1019#ifdef CONFIG_NOP_TRACER
1020int
1021trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1022{
1023        /* What could possibly go wrong? */
1024        return 0;
1025}
1026#endif
1027
1028#ifdef CONFIG_SCHED_TRACER
1029static int trace_wakeup_test_thread(void *data)
1030{
1031        /* Make this a RT thread, doesn't need to be too high */
1032        static const struct sched_param param = { .sched_priority = 5 };
1033        struct completion *x = data;
1034
1035        sched_setscheduler(current, SCHED_FIFO, &param);
1036
1037        /* Make it know we have a new prio */
1038        complete(x);
1039
1040        /* now go to sleep and let the test wake us up */
1041        set_current_state(TASK_INTERRUPTIBLE);
1042        schedule();
1043
1044        complete(x);
1045
1046        /* we are awake, now wait to disappear */
1047        while (!kthread_should_stop()) {
1048                /*
1049                 * This is an RT task, do short sleeps to let
1050                 * others run.
1051                 */
1052                msleep(100);
1053        }
1054
1055        return 0;
1056}
1057
1058int
1059trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1060{
1061        unsigned long save_max = tracing_max_latency;
1062        struct task_struct *p;
1063        struct completion isrt;
1064        unsigned long count;
1065        int ret;
1066
1067        init_completion(&isrt);
1068
1069        /* create a high prio thread */
1070        p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
1071        if (IS_ERR(p)) {
1072                printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1073                return -1;
1074        }
1075
1076        /* make sure the thread is running at an RT prio */
1077        wait_for_completion(&isrt);
1078
1079        /* start the tracing */
1080        ret = tracer_init(trace, tr);
1081        if (ret) {
1082                warn_failed_init_tracer(trace, ret);
1083                return ret;
1084        }
1085
1086        /* reset the max latency */
1087        tracing_max_latency = 0;
1088
1089        while (p->on_rq) {
1090                /*
1091                 * Sleep to make sure the RT thread is asleep too.
1092                 * On virtual machines we can't rely on timings,
1093                 * but we want to make sure this test still works.
1094                 */
1095                msleep(100);
1096        }
1097
1098        init_completion(&isrt);
1099
1100        wake_up_process(p);
1101
1102        /* Wait for the task to wake up */
1103        wait_for_completion(&isrt);
1104
1105        /* stop the tracing. */
1106        tracing_stop();
1107        /* check both trace buffers */
1108        ret = trace_test_buffer(tr, NULL);
1109        if (!ret)
1110                ret = trace_test_buffer(&max_tr, &count);
1111
1112
1113        trace->reset(tr);
1114        tracing_start();
1115
1116        tracing_max_latency = save_max;
1117
1118        /* kill the thread */
1119        kthread_stop(p);
1120
1121        if (!ret && !count) {
1122                printk(KERN_CONT ".. no entries found ..");
1123                ret = -1;
1124        }
1125
1126        return ret;
1127}
1128#endif /* CONFIG_SCHED_TRACER */
1129
1130#ifdef CONFIG_CONTEXT_SWITCH_TRACER
1131int
1132trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
1133{
1134        unsigned long count;
1135        int ret;
1136
1137        /* start the tracing */
1138        ret = tracer_init(trace, tr);
1139        if (ret) {
1140                warn_failed_init_tracer(trace, ret);
1141                return ret;
1142        }
1143
1144        /* Sleep for a 1/10 of a second */
1145        msleep(100);
1146        /* stop the tracing. */
1147        tracing_stop();
1148        /* check the trace buffer */
1149        ret = trace_test_buffer(tr, &count);
1150        trace->reset(tr);
1151        tracing_start();
1152
1153        if (!ret && !count) {
1154                printk(KERN_CONT ".. no entries found ..");
1155                ret = -1;
1156        }
1157
1158        return ret;
1159}
1160#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
1161
1162#ifdef CONFIG_BRANCH_TRACER
1163int
1164trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1165{
1166        unsigned long count;
1167        int ret;
1168
1169        /* start the tracing */
1170        ret = tracer_init(trace, tr);
1171        if (ret) {
1172                warn_failed_init_tracer(trace, ret);
1173                return ret;
1174        }
1175
1176        /* Sleep for a 1/10 of a second */
1177        msleep(100);
1178        /* stop the tracing. */
1179        tracing_stop();
1180        /* check the trace buffer */
1181        ret = trace_test_buffer(tr, &count);
1182        trace->reset(tr);
1183        tracing_start();
1184
1185        if (!ret && !count) {
1186                printk(KERN_CONT ".. no entries found ..");
1187                ret = -1;
1188        }
1189
1190        return ret;
1191}
1192#endif /* CONFIG_BRANCH_TRACER */
1193
1194
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.