linux/kernel/trace/trace_selftest.c
<<
>>
Prefs
   1/* Include in trace.c */
   2
   3#include <linux/stringify.h>
   4#include <linux/kthread.h>
   5#include <linux/delay.h>
   6#include <linux/slab.h>
   7
   8static inline int trace_valid_entry(struct trace_entry *entry)
   9{
  10        switch (entry->type) {
  11        case TRACE_FN:
  12        case TRACE_CTX:
  13        case TRACE_WAKE:
  14        case TRACE_STACK:
  15        case TRACE_PRINT:
  16        case TRACE_BRANCH:
  17        case TRACE_GRAPH_ENT:
  18        case TRACE_GRAPH_RET:
  19                return 1;
  20        }
  21        return 0;
  22}
  23
  24static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
  25{
  26        struct ring_buffer_event *event;
  27        struct trace_entry *entry;
  28        unsigned int loops = 0;
  29
  30        while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
  31                entry = ring_buffer_event_data(event);
  32
  33                /*
  34                 * The ring buffer is a size of trace_buf_size, if
  35                 * we loop more than the size, there's something wrong
  36                 * with the ring buffer.
  37                 */
  38                if (loops++ > trace_buf_size) {
  39                        printk(KERN_CONT ".. bad ring buffer ");
  40                        goto failed;
  41                }
  42                if (!trace_valid_entry(entry)) {
  43                        printk(KERN_CONT ".. invalid entry %d ",
  44                                entry->type);
  45                        goto failed;
  46                }
  47        }
  48        return 0;
  49
  50 failed:
  51        /* disable tracing */
  52        tracing_disabled = 1;
  53        printk(KERN_CONT ".. corrupted trace buffer .. ");
  54        return -1;
  55}
  56
  57/*
  58 * Test the trace buffer to see if all the elements
  59 * are still sane.
  60 */
  61static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
  62{
  63        unsigned long flags, cnt = 0;
  64        int cpu, ret = 0;
  65
  66        /* Don't allow flipping of max traces now */
  67        local_irq_save(flags);
  68        arch_spin_lock(&ftrace_max_lock);
  69
  70        cnt = ring_buffer_entries(tr->buffer);
  71
  72        /*
  73         * The trace_test_buffer_cpu runs a while loop to consume all data.
  74         * If the calling tracer is broken, and is constantly filling
  75         * the buffer, this will run forever, and hard lock the box.
  76         * We disable the ring buffer while we do this test to prevent
  77         * a hard lock up.
  78         */
  79        tracing_off();
  80        for_each_possible_cpu(cpu) {
  81                ret = trace_test_buffer_cpu(tr, cpu);
  82                if (ret)
  83                        break;
  84        }
  85        tracing_on();
  86        arch_spin_unlock(&ftrace_max_lock);
  87        local_irq_restore(flags);
  88
  89        if (count)
  90                *count = cnt;
  91
  92        return ret;
  93}
  94
  95static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  96{
  97        printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
  98                trace->name, init_ret);
  99}
 100#ifdef CONFIG_FUNCTION_TRACER
 101
 102#ifdef CONFIG_DYNAMIC_FTRACE
 103
 104static int trace_selftest_test_probe1_cnt;
 105static void trace_selftest_test_probe1_func(unsigned long ip,
 106                                            unsigned long pip,
 107                                            struct ftrace_ops *op,
 108                                            struct pt_regs *pt_regs)
 109{
 110        trace_selftest_test_probe1_cnt++;
 111}
 112
 113static int trace_selftest_test_probe2_cnt;
 114static void trace_selftest_test_probe2_func(unsigned long ip,
 115                                            unsigned long pip,
 116                                            struct ftrace_ops *op,
 117                                            struct pt_regs *pt_regs)
 118{
 119        trace_selftest_test_probe2_cnt++;
 120}
 121
 122static int trace_selftest_test_probe3_cnt;
 123static void trace_selftest_test_probe3_func(unsigned long ip,
 124                                            unsigned long pip,
 125                                            struct ftrace_ops *op,
 126                                            struct pt_regs *pt_regs)
 127{
 128        trace_selftest_test_probe3_cnt++;
 129}
 130
 131static int trace_selftest_test_global_cnt;
 132static void trace_selftest_test_global_func(unsigned long ip,
 133                                            unsigned long pip,
 134                                            struct ftrace_ops *op,
 135                                            struct pt_regs *pt_regs)
 136{
 137        trace_selftest_test_global_cnt++;
 138}
 139
 140static int trace_selftest_test_dyn_cnt;
 141static void trace_selftest_test_dyn_func(unsigned long ip,
 142                                         unsigned long pip,
 143                                         struct ftrace_ops *op,
 144                                         struct pt_regs *pt_regs)
 145{
 146        trace_selftest_test_dyn_cnt++;
 147}
 148
 149static struct ftrace_ops test_probe1 = {
 150        .func                   = trace_selftest_test_probe1_func,
 151        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
 152};
 153
 154static struct ftrace_ops test_probe2 = {
 155        .func                   = trace_selftest_test_probe2_func,
 156        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
 157};
 158
 159static struct ftrace_ops test_probe3 = {
 160        .func                   = trace_selftest_test_probe3_func,
 161        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
 162};
 163
 164static struct ftrace_ops test_global = {
 165        .func           = trace_selftest_test_global_func,
 166        .flags          = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
 167};
 168
 169static void print_counts(void)
 170{
 171        printk("(%d %d %d %d %d) ",
 172               trace_selftest_test_probe1_cnt,
 173               trace_selftest_test_probe2_cnt,
 174               trace_selftest_test_probe3_cnt,
 175               trace_selftest_test_global_cnt,
 176               trace_selftest_test_dyn_cnt);
 177}
 178
 179static void reset_counts(void)
 180{
 181        trace_selftest_test_probe1_cnt = 0;
 182        trace_selftest_test_probe2_cnt = 0;
 183        trace_selftest_test_probe3_cnt = 0;
 184        trace_selftest_test_global_cnt = 0;
 185        trace_selftest_test_dyn_cnt = 0;
 186}
 187
 188static int trace_selftest_ops(int cnt)
 189{
 190        int save_ftrace_enabled = ftrace_enabled;
 191        struct ftrace_ops *dyn_ops;
 192        char *func1_name;
 193        char *func2_name;
 194        int len1;
 195        int len2;
 196        int ret = -1;
 197
 198        printk(KERN_CONT "PASSED\n");
 199        pr_info("Testing dynamic ftrace ops #%d: ", cnt);
 200
 201        ftrace_enabled = 1;
 202        reset_counts();
 203
 204        /* Handle PPC64 '.' name */
 205        func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 206        func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
 207        len1 = strlen(func1_name);
 208        len2 = strlen(func2_name);
 209
 210        /*
 211         * Probe 1 will trace function 1.
 212         * Probe 2 will trace function 2.
 213         * Probe 3 will trace functions 1 and 2.
 214         */
 215        ftrace_set_filter(&test_probe1, func1_name, len1, 1);
 216        ftrace_set_filter(&test_probe2, func2_name, len2, 1);
 217        ftrace_set_filter(&test_probe3, func1_name, len1, 1);
 218        ftrace_set_filter(&test_probe3, func2_name, len2, 0);
 219
 220        register_ftrace_function(&test_probe1);
 221        register_ftrace_function(&test_probe2);
 222        register_ftrace_function(&test_probe3);
 223        register_ftrace_function(&test_global);
 224
 225        DYN_FTRACE_TEST_NAME();
 226
 227        print_counts();
 228
 229        if (trace_selftest_test_probe1_cnt != 1)
 230                goto out;
 231        if (trace_selftest_test_probe2_cnt != 0)
 232                goto out;
 233        if (trace_selftest_test_probe3_cnt != 1)
 234                goto out;
 235        if (trace_selftest_test_global_cnt == 0)
 236                goto out;
 237
 238        DYN_FTRACE_TEST_NAME2();
 239
 240        print_counts();
 241
 242        if (trace_selftest_test_probe1_cnt != 1)
 243                goto out;
 244        if (trace_selftest_test_probe2_cnt != 1)
 245                goto out;
 246        if (trace_selftest_test_probe3_cnt != 2)
 247                goto out;
 248
 249        /* Add a dynamic probe */
 250        dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
 251        if (!dyn_ops) {
 252                printk("MEMORY ERROR ");
 253                goto out;
 254        }
 255
 256        dyn_ops->func = trace_selftest_test_dyn_func;
 257
 258        register_ftrace_function(dyn_ops);
 259
 260        trace_selftest_test_global_cnt = 0;
 261
 262        DYN_FTRACE_TEST_NAME();
 263
 264        print_counts();
 265
 266        if (trace_selftest_test_probe1_cnt != 2)
 267                goto out_free;
 268        if (trace_selftest_test_probe2_cnt != 1)
 269                goto out_free;
 270        if (trace_selftest_test_probe3_cnt != 3)
 271                goto out_free;
 272        if (trace_selftest_test_global_cnt == 0)
 273                goto out;
 274        if (trace_selftest_test_dyn_cnt == 0)
 275                goto out_free;
 276
 277        DYN_FTRACE_TEST_NAME2();
 278
 279        print_counts();
 280
 281        if (trace_selftest_test_probe1_cnt != 2)
 282                goto out_free;
 283        if (trace_selftest_test_probe2_cnt != 2)
 284                goto out_free;
 285        if (trace_selftest_test_probe3_cnt != 4)
 286                goto out_free;
 287
 288        ret = 0;
 289 out_free:
 290        unregister_ftrace_function(dyn_ops);
 291        kfree(dyn_ops);
 292
 293 out:
 294        /* Purposely unregister in the same order */
 295        unregister_ftrace_function(&test_probe1);
 296        unregister_ftrace_function(&test_probe2);
 297        unregister_ftrace_function(&test_probe3);
 298        unregister_ftrace_function(&test_global);
 299
 300        /* Make sure everything is off */
 301        reset_counts();
 302        DYN_FTRACE_TEST_NAME();
 303        DYN_FTRACE_TEST_NAME();
 304
 305        if (trace_selftest_test_probe1_cnt ||
 306            trace_selftest_test_probe2_cnt ||
 307            trace_selftest_test_probe3_cnt ||
 308            trace_selftest_test_global_cnt ||
 309            trace_selftest_test_dyn_cnt)
 310                ret = -1;
 311
 312        ftrace_enabled = save_ftrace_enabled;
 313
 314        return ret;
 315}
 316
 317/* Test dynamic code modification and ftrace filters */
 318int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
 319                                           struct trace_array *tr,
 320                                           int (*func)(void))
 321{
 322        int save_ftrace_enabled = ftrace_enabled;
 323        unsigned long count;
 324        char *func_name;
 325        int ret;
 326
 327        /* The ftrace test PASSED */
 328        printk(KERN_CONT "PASSED\n");
 329        pr_info("Testing dynamic ftrace: ");
 330
 331        /* enable tracing, and record the filter function */
 332        ftrace_enabled = 1;
 333
 334        /* passed in by parameter to fool gcc from optimizing */
 335        func();
 336
 337        /*
 338         * Some archs *cough*PowerPC*cough* add characters to the
 339         * start of the function names. We simply put a '*' to
 340         * accommodate them.
 341         */
 342        func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 343
 344        /* filter only on our function */
 345        ftrace_set_global_filter(func_name, strlen(func_name), 1);
 346
 347        /* enable tracing */
 348        ret = tracer_init(trace, tr);
 349        if (ret) {
 350                warn_failed_init_tracer(trace, ret);
 351                goto out;
 352        }
 353
 354        /* Sleep for a 1/10 of a second */
 355        msleep(100);
 356
 357        /* we should have nothing in the buffer */
 358        ret = trace_test_buffer(tr, &count);
 359        if (ret)
 360                goto out;
 361
 362        if (count) {
 363                ret = -1;
 364                printk(KERN_CONT ".. filter did not filter .. ");
 365                goto out;
 366        }
 367
 368        /* call our function again */
 369        func();
 370
 371        /* sleep again */
 372        msleep(100);
 373
 374        /* stop the tracing. */
 375        tracing_stop();
 376        ftrace_enabled = 0;
 377
 378        /* check the trace buffer */
 379        ret = trace_test_buffer(tr, &count);
 380        tracing_start();
 381
 382        /* we should only have one item */
 383        if (!ret && count != 1) {
 384                trace->reset(tr);
 385                printk(KERN_CONT ".. filter failed count=%ld ..", count);
 386                ret = -1;
 387                goto out;
 388        }
 389
 390        /* Test the ops with global tracing running */
 391        ret = trace_selftest_ops(1);
 392        trace->reset(tr);
 393
 394 out:
 395        ftrace_enabled = save_ftrace_enabled;
 396
 397        /* Enable tracing on all functions again */
 398        ftrace_set_global_filter(NULL, 0, 1);
 399
 400        /* Test the ops with global tracing off */
 401        if (!ret)
 402                ret = trace_selftest_ops(2);
 403
 404        return ret;
 405}
 406
 407static int trace_selftest_recursion_cnt;
 408static void trace_selftest_test_recursion_func(unsigned long ip,
 409                                               unsigned long pip,
 410                                               struct ftrace_ops *op,
 411                                               struct pt_regs *pt_regs)
 412{
 413        /*
 414         * This function is registered without the recursion safe flag.
 415         * The ftrace infrastructure should provide the recursion
 416         * protection. If not, this will crash the kernel!
 417         */
 418        if (trace_selftest_recursion_cnt++ > 10)
 419                return;
 420        DYN_FTRACE_TEST_NAME();
 421}
 422
 423static void trace_selftest_test_recursion_safe_func(unsigned long ip,
 424                                                    unsigned long pip,
 425                                                    struct ftrace_ops *op,
 426                                                    struct pt_regs *pt_regs)
 427{
 428        /*
 429         * We said we would provide our own recursion. By calling
 430         * this function again, we should recurse back into this function
 431         * and count again. But this only happens if the arch supports
 432         * all of ftrace features and nothing else is using the function
 433         * tracing utility.
 434         */
 435        if (trace_selftest_recursion_cnt++)
 436                return;
 437        DYN_FTRACE_TEST_NAME();
 438}
 439
 440static struct ftrace_ops test_rec_probe = {
 441        .func                   = trace_selftest_test_recursion_func,
 442};
 443
 444static struct ftrace_ops test_recsafe_probe = {
 445        .func                   = trace_selftest_test_recursion_safe_func,
 446        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE,
 447};
 448
 449static int
 450trace_selftest_function_recursion(void)
 451{
 452        int save_ftrace_enabled = ftrace_enabled;
 453        char *func_name;
 454        int len;
 455        int ret;
 456
 457        /* The previous test PASSED */
 458        pr_cont("PASSED\n");
 459        pr_info("Testing ftrace recursion: ");
 460
 461
 462        /* enable tracing, and record the filter function */
 463        ftrace_enabled = 1;
 464
 465        /* Handle PPC64 '.' name */
 466        func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 467        len = strlen(func_name);
 468
 469        ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
 470        if (ret) {
 471                pr_cont("*Could not set filter* ");
 472                goto out;
 473        }
 474
 475        ret = register_ftrace_function(&test_rec_probe);
 476        if (ret) {
 477                pr_cont("*could not register callback* ");
 478                goto out;
 479        }
 480
 481        DYN_FTRACE_TEST_NAME();
 482
 483        unregister_ftrace_function(&test_rec_probe);
 484
 485        ret = -1;
 486        if (trace_selftest_recursion_cnt != 1) {
 487                pr_cont("*callback not called once (%d)* ",
 488                        trace_selftest_recursion_cnt);
 489                goto out;
 490        }
 491
 492        trace_selftest_recursion_cnt = 1;
 493
 494        pr_cont("PASSED\n");
 495        pr_info("Testing ftrace recursion safe: ");
 496
 497        ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
 498        if (ret) {
 499                pr_cont("*Could not set filter* ");
 500                goto out;
 501        }
 502
 503        ret = register_ftrace_function(&test_recsafe_probe);
 504        if (ret) {
 505                pr_cont("*could not register callback* ");
 506                goto out;
 507        }
 508
 509        DYN_FTRACE_TEST_NAME();
 510
 511        unregister_ftrace_function(&test_recsafe_probe);
 512
 513        ret = -1;
 514        if (trace_selftest_recursion_cnt != 2) {
 515                pr_cont("*callback not called expected 2 times (%d)* ",
 516                        trace_selftest_recursion_cnt);
 517                goto out;
 518        }
 519
 520        ret = 0;
 521out:
 522        ftrace_enabled = save_ftrace_enabled;
 523
 524        return ret;
 525}
 526#else
 527# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
 528# define trace_selftest_function_recursion() ({ 0; })
 529#endif /* CONFIG_DYNAMIC_FTRACE */
 530
 531static enum {
 532        TRACE_SELFTEST_REGS_START,
 533        TRACE_SELFTEST_REGS_FOUND,
 534        TRACE_SELFTEST_REGS_NOT_FOUND,
 535} trace_selftest_regs_stat;
 536
 537static void trace_selftest_test_regs_func(unsigned long ip,
 538                                          unsigned long pip,
 539                                          struct ftrace_ops *op,
 540                                          struct pt_regs *pt_regs)
 541{
 542        if (pt_regs)
 543                trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
 544        else
 545                trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
 546}
 547
 548static struct ftrace_ops test_regs_probe = {
 549        .func           = trace_selftest_test_regs_func,
 550        .flags          = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
 551};
 552
 553static int
 554trace_selftest_function_regs(void)
 555{
 556        int save_ftrace_enabled = ftrace_enabled;
 557        char *func_name;
 558        int len;
 559        int ret;
 560        int supported = 0;
 561
 562#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 563        supported = 1;
 564#endif
 565
 566        /* The previous test PASSED */
 567        pr_cont("PASSED\n");
 568        pr_info("Testing ftrace regs%s: ",
 569                !supported ? "(no arch support)" : "");
 570
 571        /* enable tracing, and record the filter function */
 572        ftrace_enabled = 1;
 573
 574        /* Handle PPC64 '.' name */
 575        func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 576        len = strlen(func_name);
 577
 578        ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
 579        /*
 580         * If DYNAMIC_FTRACE is not set, then we just trace all functions.
 581         * This test really doesn't care.
 582         */
 583        if (ret && ret != -ENODEV) {
 584                pr_cont("*Could not set filter* ");
 585                goto out;
 586        }
 587
 588        ret = register_ftrace_function(&test_regs_probe);
 589        /*
 590         * Now if the arch does not support passing regs, then this should
 591         * have failed.
 592         */
 593        if (!supported) {
 594                if (!ret) {
 595                        pr_cont("*registered save-regs without arch support* ");
 596                        goto out;
 597                }
 598                test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
 599                ret = register_ftrace_function(&test_regs_probe);
 600        }
 601        if (ret) {
 602                pr_cont("*could not register callback* ");
 603                goto out;
 604        }
 605
 606
 607        DYN_FTRACE_TEST_NAME();
 608
 609        unregister_ftrace_function(&test_regs_probe);
 610
 611        ret = -1;
 612
 613        switch (trace_selftest_regs_stat) {
 614        case TRACE_SELFTEST_REGS_START:
 615                pr_cont("*callback never called* ");
 616                goto out;
 617
 618        case TRACE_SELFTEST_REGS_FOUND:
 619                if (supported)
 620                        break;
 621                pr_cont("*callback received regs without arch support* ");
 622                goto out;
 623
 624        case TRACE_SELFTEST_REGS_NOT_FOUND:
 625                if (!supported)
 626                        break;
 627                pr_cont("*callback received NULL regs* ");
 628                goto out;
 629        }
 630
 631        ret = 0;
 632out:
 633        ftrace_enabled = save_ftrace_enabled;
 634
 635        return ret;
 636}
 637
 638/*
 639 * Simple verification test of ftrace function tracer.
 640 * Enable ftrace, sleep 1/10 second, and then read the trace
 641 * buffer to see if all is in order.
 642 */
 643int
 644trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 645{
 646        int save_ftrace_enabled = ftrace_enabled;
 647        unsigned long count;
 648        int ret;
 649
 650        /* make sure msleep has been recorded */
 651        msleep(1);
 652
 653        /* start the tracing */
 654        ftrace_enabled = 1;
 655
 656        ret = tracer_init(trace, tr);
 657        if (ret) {
 658                warn_failed_init_tracer(trace, ret);
 659                goto out;
 660        }
 661
 662        /* Sleep for a 1/10 of a second */
 663        msleep(100);
 664        /* stop the tracing. */
 665        tracing_stop();
 666        ftrace_enabled = 0;
 667
 668        /* check the trace buffer */
 669        ret = trace_test_buffer(tr, &count);
 670        trace->reset(tr);
 671        tracing_start();
 672
 673        if (!ret && !count) {
 674                printk(KERN_CONT ".. no entries found ..");
 675                ret = -1;
 676                goto out;
 677        }
 678
 679        ret = trace_selftest_startup_dynamic_tracing(trace, tr,
 680                                                     DYN_FTRACE_TEST_NAME);
 681        if (ret)
 682                goto out;
 683
 684        ret = trace_selftest_function_recursion();
 685        if (ret)
 686                goto out;
 687
 688        ret = trace_selftest_function_regs();
 689 out:
 690        ftrace_enabled = save_ftrace_enabled;
 691
 692        /* kill ftrace totally if we failed */
 693        if (ret)
 694                ftrace_kill();
 695
 696        return ret;
 697}
 698#endif /* CONFIG_FUNCTION_TRACER */
 699
 700
 701#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 702
 703/* Maximum number of functions to trace before diagnosing a hang */
 704#define GRAPH_MAX_FUNC_TEST     100000000
 705
 706static void
 707__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
 708static unsigned int graph_hang_thresh;
 709
 710/* Wrap the real function entry probe to avoid possible hanging */
 711static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
 712{
 713        /* This is harmlessly racy, we want to approximately detect a hang */
 714        if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
 715                ftrace_graph_stop();
 716                printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
 717                if (ftrace_dump_on_oops)
 718                        __ftrace_dump(false, DUMP_ALL);
 719                return 0;
 720        }
 721
 722        return trace_graph_entry(trace);
 723}
 724
 725/*
 726 * Pretty much the same than for the function tracer from which the selftest
 727 * has been borrowed.
 728 */
 729int
 730trace_selftest_startup_function_graph(struct tracer *trace,
 731                                        struct trace_array *tr)
 732{
 733        int ret;
 734        unsigned long count;
 735
 736        /*
 737         * Simulate the init() callback but we attach a watchdog callback
 738         * to detect and recover from possible hangs
 739         */
 740        tracing_reset_online_cpus(tr);
 741        set_graph_array(tr);
 742        ret = register_ftrace_graph(&trace_graph_return,
 743                                    &trace_graph_entry_watchdog);
 744        if (ret) {
 745                warn_failed_init_tracer(trace, ret);
 746                goto out;
 747        }
 748        tracing_start_cmdline_record();
 749
 750        /* Sleep for a 1/10 of a second */
 751        msleep(100);
 752
 753        /* Have we just recovered from a hang? */
 754        if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
 755                tracing_selftest_disabled = true;
 756                ret = -1;
 757                goto out;
 758        }
 759
 760        tracing_stop();
 761
 762        /* check the trace buffer */
 763        ret = trace_test_buffer(tr, &count);
 764
 765        trace->reset(tr);
 766        tracing_start();
 767
 768        if (!ret && !count) {
 769                printk(KERN_CONT ".. no entries found ..");
 770                ret = -1;
 771                goto out;
 772        }
 773
 774        /* Don't test dynamic tracing, the function tracer already did */
 775
 776out:
 777        /* Stop it if we failed */
 778        if (ret)
 779                ftrace_graph_stop();
 780
 781        return ret;
 782}
 783#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 784
 785
 786#ifdef CONFIG_IRQSOFF_TRACER
 787int
 788trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
 789{
 790        unsigned long save_max = tracing_max_latency;
 791        unsigned long count;
 792        int ret;
 793
 794        /* start the tracing */
 795        ret = tracer_init(trace, tr);
 796        if (ret) {
 797                warn_failed_init_tracer(trace, ret);
 798                return ret;
 799        }
 800
 801        /* reset the max latency */
 802        tracing_max_latency = 0;
 803        /* disable interrupts for a bit */
 804        local_irq_disable();
 805        udelay(100);
 806        local_irq_enable();
 807
 808        /*
 809         * Stop the tracer to avoid a warning subsequent
 810         * to buffer flipping failure because tracing_stop()
 811         * disables the tr and max buffers, making flipping impossible
 812         * in case of parallels max irqs off latencies.
 813         */
 814        trace->stop(tr);
 815        /* stop the tracing. */
 816        tracing_stop();
 817        /* check both trace buffers */
 818        ret = trace_test_buffer(tr, NULL);
 819        if (!ret)
 820                ret = trace_test_buffer(&max_tr, &count);
 821        trace->reset(tr);
 822        tracing_start();
 823
 824        if (!ret && !count) {
 825                printk(KERN_CONT ".. no entries found ..");
 826                ret = -1;
 827        }
 828
 829        tracing_max_latency = save_max;
 830
 831        return ret;
 832}
 833#endif /* CONFIG_IRQSOFF_TRACER */
 834
 835#ifdef CONFIG_PREEMPT_TRACER
 836int
 837trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
 838{
 839        unsigned long save_max = tracing_max_latency;
 840        unsigned long count;
 841        int ret;
 842
 843        /*
 844         * Now that the big kernel lock is no longer preemptable,
 845         * and this is called with the BKL held, it will always
 846         * fail. If preemption is already disabled, simply
 847         * pass the test. When the BKL is removed, or becomes
 848         * preemptible again, we will once again test this,
 849         * so keep it in.
 850         */
 851        if (preempt_count()) {
 852                printk(KERN_CONT "can not test ... force ");
 853                return 0;
 854        }
 855
 856        /* start the tracing */
 857        ret = tracer_init(trace, tr);
 858        if (ret) {
 859                warn_failed_init_tracer(trace, ret);
 860                return ret;
 861        }
 862
 863        /* reset the max latency */
 864        tracing_max_latency = 0;
 865        /* disable preemption for a bit */
 866        preempt_disable();
 867        udelay(100);
 868        preempt_enable();
 869
 870        /*
 871         * Stop the tracer to avoid a warning subsequent
 872         * to buffer flipping failure because tracing_stop()
 873         * disables the tr and max buffers, making flipping impossible
 874         * in case of parallels max preempt off latencies.
 875         */
 876        trace->stop(tr);
 877        /* stop the tracing. */
 878        tracing_stop();
 879        /* check both trace buffers */
 880        ret = trace_test_buffer(tr, NULL);
 881        if (!ret)
 882                ret = trace_test_buffer(&max_tr, &count);
 883        trace->reset(tr);
 884        tracing_start();
 885
 886        if (!ret && !count) {
 887                printk(KERN_CONT ".. no entries found ..");
 888                ret = -1;
 889        }
 890
 891        tracing_max_latency = save_max;
 892
 893        return ret;
 894}
 895#endif /* CONFIG_PREEMPT_TRACER */
 896
 897#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
 898int
 899trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
 900{
 901        unsigned long save_max = tracing_max_latency;
 902        unsigned long count;
 903        int ret;
 904
 905        /*
 906         * Now that the big kernel lock is no longer preemptable,
 907         * and this is called with the BKL held, it will always
 908         * fail. If preemption is already disabled, simply
 909         * pass the test. When the BKL is removed, or becomes
 910         * preemptible again, we will once again test this,
 911         * so keep it in.
 912         */
 913        if (preempt_count()) {
 914                printk(KERN_CONT "can not test ... force ");
 915                return 0;
 916        }
 917
 918        /* start the tracing */
 919        ret = tracer_init(trace, tr);
 920        if (ret) {
 921                warn_failed_init_tracer(trace, ret);
 922                goto out_no_start;
 923        }
 924
 925        /* reset the max latency */
 926        tracing_max_latency = 0;
 927
 928        /* disable preemption and interrupts for a bit */
 929        preempt_disable();
 930        local_irq_disable();
 931        udelay(100);
 932        preempt_enable();
 933        /* reverse the order of preempt vs irqs */
 934        local_irq_enable();
 935
 936        /*
 937         * Stop the tracer to avoid a warning subsequent
 938         * to buffer flipping failure because tracing_stop()
 939         * disables the tr and max buffers, making flipping impossible
 940         * in case of parallels max irqs/preempt off latencies.
 941         */
 942        trace->stop(tr);
 943        /* stop the tracing. */
 944        tracing_stop();
 945        /* check both trace buffers */
 946        ret = trace_test_buffer(tr, NULL);
 947        if (ret)
 948                goto out;
 949
 950        ret = trace_test_buffer(&max_tr, &count);
 951        if (ret)
 952                goto out;
 953
 954        if (!ret && !count) {
 955                printk(KERN_CONT ".. no entries found ..");
 956                ret = -1;
 957                goto out;
 958        }
 959
 960        /* do the test by disabling interrupts first this time */
 961        tracing_max_latency = 0;
 962        tracing_start();
 963        trace->start(tr);
 964
 965        preempt_disable();
 966        local_irq_disable();
 967        udelay(100);
 968        preempt_enable();
 969        /* reverse the order of preempt vs irqs */
 970        local_irq_enable();
 971
 972        trace->stop(tr);
 973        /* stop the tracing. */
 974        tracing_stop();
 975        /* check both trace buffers */
 976        ret = trace_test_buffer(tr, NULL);
 977        if (ret)
 978                goto out;
 979
 980        ret = trace_test_buffer(&max_tr, &count);
 981
 982        if (!ret && !count) {
 983                printk(KERN_CONT ".. no entries found ..");
 984                ret = -1;
 985                goto out;
 986        }
 987
 988out:
 989        tracing_start();
 990out_no_start:
 991        trace->reset(tr);
 992        tracing_max_latency = save_max;
 993
 994        return ret;
 995}
 996#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
 997
 998#ifdef CONFIG_NOP_TRACER
 999int
1000trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1001{
1002        /* What could possibly go wrong? */
1003        return 0;
1004}
1005#endif
1006
1007#ifdef CONFIG_SCHED_TRACER
1008static int trace_wakeup_test_thread(void *data)
1009{
1010        /* Make this a RT thread, doesn't need to be too high */
1011        static const struct sched_param param = { .sched_priority = 5 };
1012        struct completion *x = data;
1013
1014        sched_setscheduler(current, SCHED_FIFO, &param);
1015
1016        /* Make it know we have a new prio */
1017        complete(x);
1018
1019        /* now go to sleep and let the test wake us up */
1020        set_current_state(TASK_INTERRUPTIBLE);
1021        schedule();
1022
1023        complete(x);
1024
1025        /* we are awake, now wait to disappear */
1026        while (!kthread_should_stop()) {
1027                /*
1028                 * This is an RT task, do short sleeps to let
1029                 * others run.
1030                 */
1031                msleep(100);
1032        }
1033
1034        return 0;
1035}
1036
1037int
1038trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1039{
1040        unsigned long save_max = tracing_max_latency;
1041        struct task_struct *p;
1042        struct completion isrt;
1043        unsigned long count;
1044        int ret;
1045
1046        init_completion(&isrt);
1047
1048        /* create a high prio thread */
1049        p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
1050        if (IS_ERR(p)) {
1051                printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1052                return -1;
1053        }
1054
1055        /* make sure the thread is running at an RT prio */
1056        wait_for_completion(&isrt);
1057
1058        /* start the tracing */
1059        ret = tracer_init(trace, tr);
1060        if (ret) {
1061                warn_failed_init_tracer(trace, ret);
1062                return ret;
1063        }
1064
1065        /* reset the max latency */
1066        tracing_max_latency = 0;
1067
1068        while (p->on_rq) {
1069                /*
1070                 * Sleep to make sure the RT thread is asleep too.
1071                 * On virtual machines we can't rely on timings,
1072                 * but we want to make sure this test still works.
1073                 */
1074                msleep(100);
1075        }
1076
1077        init_completion(&isrt);
1078
1079        wake_up_process(p);
1080
1081        /* Wait for the task to wake up */
1082        wait_for_completion(&isrt);
1083
1084        /* stop the tracing. */
1085        tracing_stop();
1086        /* check both trace buffers */
1087        ret = trace_test_buffer(tr, NULL);
1088        printk("ret = %d\n", ret);
1089        if (!ret)
1090                ret = trace_test_buffer(&max_tr, &count);
1091
1092
1093        trace->reset(tr);
1094        tracing_start();
1095
1096        tracing_max_latency = save_max;
1097
1098        /* kill the thread */
1099        kthread_stop(p);
1100
1101        if (!ret && !count) {
1102                printk(KERN_CONT ".. no entries found ..");
1103                ret = -1;
1104        }
1105
1106        return ret;
1107}
1108#endif /* CONFIG_SCHED_TRACER */
1109
1110#ifdef CONFIG_CONTEXT_SWITCH_TRACER
1111int
1112trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
1113{
1114        unsigned long count;
1115        int ret;
1116
1117        /* start the tracing */
1118        ret = tracer_init(trace, tr);
1119        if (ret) {
1120                warn_failed_init_tracer(trace, ret);
1121                return ret;
1122        }
1123
1124        /* Sleep for a 1/10 of a second */
1125        msleep(100);
1126        /* stop the tracing. */
1127        tracing_stop();
1128        /* check the trace buffer */
1129        ret = trace_test_buffer(tr, &count);
1130        trace->reset(tr);
1131        tracing_start();
1132
1133        if (!ret && !count) {
1134                printk(KERN_CONT ".. no entries found ..");
1135                ret = -1;
1136        }
1137
1138        return ret;
1139}
1140#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
1141
1142#ifdef CONFIG_BRANCH_TRACER
1143int
1144trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1145{
1146        unsigned long count;
1147        int ret;
1148
1149        /* start the tracing */
1150        ret = tracer_init(trace, tr);
1151        if (ret) {
1152                warn_failed_init_tracer(trace, ret);
1153                return ret;
1154        }
1155
1156        /* Sleep for a 1/10 of a second */
1157        msleep(100);
1158        /* stop the tracing. */
1159        tracing_stop();
1160        /* check the trace buffer */
1161        ret = trace_test_buffer(tr, &count);
1162        trace->reset(tr);
1163        tracing_start();
1164
1165        if (!ret && !count) {
1166                printk(KERN_CONT ".. no entries found ..");
1167                ret = -1;
1168        }
1169
1170        return ret;
1171}
1172#endif /* CONFIG_BRANCH_TRACER */
1173
1174
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.