linux/kernel/trace/trace_selftest.c
<<
>>
Prefs
   1/* Include in trace.c */
   2
   3#include <linux/stringify.h>
   4#include <linux/kthread.h>
   5#include <linux/delay.h>
   6#include <linux/slab.h>
   7
   8static inline int trace_valid_entry(struct trace_entry *entry)
   9{
  10        switch (entry->type) {
  11        case TRACE_FN:
  12        case TRACE_CTX:
  13        case TRACE_WAKE:
  14        case TRACE_STACK:
  15        case TRACE_PRINT:
  16        case TRACE_BRANCH:
  17        case TRACE_GRAPH_ENT:
  18        case TRACE_GRAPH_RET:
  19                return 1;
  20        }
  21        return 0;
  22}
  23
  24static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
  25{
  26        struct ring_buffer_event *event;
  27        struct trace_entry *entry;
  28        unsigned int loops = 0;
  29
  30        while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
  31                entry = ring_buffer_event_data(event);
  32
  33                /*
  34                 * The ring buffer is a size of trace_buf_size, if
  35                 * we loop more than the size, there's something wrong
  36                 * with the ring buffer.
  37                 */
  38                if (loops++ > trace_buf_size) {
  39                        printk(KERN_CONT ".. bad ring buffer ");
  40                        goto failed;
  41                }
  42                if (!trace_valid_entry(entry)) {
  43                        printk(KERN_CONT ".. invalid entry %d ",
  44                                entry->type);
  45                        goto failed;
  46                }
  47        }
  48        return 0;
  49
  50 failed:
  51        /* disable tracing */
  52        tracing_disabled = 1;
  53        printk(KERN_CONT ".. corrupted trace buffer .. ");
  54        return -1;
  55}
  56
  57/*
  58 * Test the trace buffer to see if all the elements
  59 * are still sane.
  60 */
  61static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
  62{
  63        unsigned long flags, cnt = 0;
  64        int cpu, ret = 0;
  65
  66        /* Don't allow flipping of max traces now */
  67        local_irq_save(flags);
  68        arch_spin_lock(&ftrace_max_lock);
  69
  70        cnt = ring_buffer_entries(tr->buffer);
  71
  72        /*
  73         * The trace_test_buffer_cpu runs a while loop to consume all data.
  74         * If the calling tracer is broken, and is constantly filling
  75         * the buffer, this will run forever, and hard lock the box.
  76         * We disable the ring buffer while we do this test to prevent
  77         * a hard lock up.
  78         */
  79        tracing_off();
  80        for_each_possible_cpu(cpu) {
  81                ret = trace_test_buffer_cpu(tr, cpu);
  82                if (ret)
  83                        break;
  84        }
  85        tracing_on();
  86        arch_spin_unlock(&ftrace_max_lock);
  87        local_irq_restore(flags);
  88
  89        if (count)
  90                *count = cnt;
  91
  92        return ret;
  93}
  94
  95static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  96{
  97        printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
  98                trace->name, init_ret);
  99}
 100#ifdef CONFIG_FUNCTION_TRACER
 101
 102#ifdef CONFIG_DYNAMIC_FTRACE
 103
 104static int trace_selftest_test_probe1_cnt;
 105static void trace_selftest_test_probe1_func(unsigned long ip,
 106                                            unsigned long pip)
 107{
 108        trace_selftest_test_probe1_cnt++;
 109}
 110
 111static int trace_selftest_test_probe2_cnt;
 112static void trace_selftest_test_probe2_func(unsigned long ip,
 113                                            unsigned long pip)
 114{
 115        trace_selftest_test_probe2_cnt++;
 116}
 117
 118static int trace_selftest_test_probe3_cnt;
 119static void trace_selftest_test_probe3_func(unsigned long ip,
 120                                            unsigned long pip)
 121{
 122        trace_selftest_test_probe3_cnt++;
 123}
 124
 125static int trace_selftest_test_global_cnt;
 126static void trace_selftest_test_global_func(unsigned long ip,
 127                                            unsigned long pip)
 128{
 129        trace_selftest_test_global_cnt++;
 130}
 131
 132static int trace_selftest_test_dyn_cnt;
 133static void trace_selftest_test_dyn_func(unsigned long ip,
 134                                         unsigned long pip)
 135{
 136        trace_selftest_test_dyn_cnt++;
 137}
 138
 139static struct ftrace_ops test_probe1 = {
 140        .func                   = trace_selftest_test_probe1_func,
 141};
 142
 143static struct ftrace_ops test_probe2 = {
 144        .func                   = trace_selftest_test_probe2_func,
 145};
 146
 147static struct ftrace_ops test_probe3 = {
 148        .func                   = trace_selftest_test_probe3_func,
 149};
 150
 151static struct ftrace_ops test_global = {
 152        .func                   = trace_selftest_test_global_func,
 153        .flags                  = FTRACE_OPS_FL_GLOBAL,
 154};
 155
 156static void print_counts(void)
 157{
 158        printk("(%d %d %d %d %d) ",
 159               trace_selftest_test_probe1_cnt,
 160               trace_selftest_test_probe2_cnt,
 161               trace_selftest_test_probe3_cnt,
 162               trace_selftest_test_global_cnt,
 163               trace_selftest_test_dyn_cnt);
 164}
 165
 166static void reset_counts(void)
 167{
 168        trace_selftest_test_probe1_cnt = 0;
 169        trace_selftest_test_probe2_cnt = 0;
 170        trace_selftest_test_probe3_cnt = 0;
 171        trace_selftest_test_global_cnt = 0;
 172        trace_selftest_test_dyn_cnt = 0;
 173}
 174
 175static int trace_selftest_ops(int cnt)
 176{
 177        int save_ftrace_enabled = ftrace_enabled;
 178        struct ftrace_ops *dyn_ops;
 179        char *func1_name;
 180        char *func2_name;
 181        int len1;
 182        int len2;
 183        int ret = -1;
 184
 185        printk(KERN_CONT "PASSED\n");
 186        pr_info("Testing dynamic ftrace ops #%d: ", cnt);
 187
 188        ftrace_enabled = 1;
 189        reset_counts();
 190
 191        /* Handle PPC64 '.' name */
 192        func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 193        func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
 194        len1 = strlen(func1_name);
 195        len2 = strlen(func2_name);
 196
 197        /*
 198         * Probe 1 will trace function 1.
 199         * Probe 2 will trace function 2.
 200         * Probe 3 will trace functions 1 and 2.
 201         */
 202        ftrace_set_filter(&test_probe1, func1_name, len1, 1);
 203        ftrace_set_filter(&test_probe2, func2_name, len2, 1);
 204        ftrace_set_filter(&test_probe3, func1_name, len1, 1);
 205        ftrace_set_filter(&test_probe3, func2_name, len2, 0);
 206
 207        register_ftrace_function(&test_probe1);
 208        register_ftrace_function(&test_probe2);
 209        register_ftrace_function(&test_probe3);
 210        register_ftrace_function(&test_global);
 211
 212        DYN_FTRACE_TEST_NAME();
 213
 214        print_counts();
 215
 216        if (trace_selftest_test_probe1_cnt != 1)
 217                goto out;
 218        if (trace_selftest_test_probe2_cnt != 0)
 219                goto out;
 220        if (trace_selftest_test_probe3_cnt != 1)
 221                goto out;
 222        if (trace_selftest_test_global_cnt == 0)
 223                goto out;
 224
 225        DYN_FTRACE_TEST_NAME2();
 226
 227        print_counts();
 228
 229        if (trace_selftest_test_probe1_cnt != 1)
 230                goto out;
 231        if (trace_selftest_test_probe2_cnt != 1)
 232                goto out;
 233        if (trace_selftest_test_probe3_cnt != 2)
 234                goto out;
 235
 236        /* Add a dynamic probe */
 237        dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
 238        if (!dyn_ops) {
 239                printk("MEMORY ERROR ");
 240                goto out;
 241        }
 242
 243        dyn_ops->func = trace_selftest_test_dyn_func;
 244
 245        register_ftrace_function(dyn_ops);
 246
 247        trace_selftest_test_global_cnt = 0;
 248
 249        DYN_FTRACE_TEST_NAME();
 250
 251        print_counts();
 252
 253        if (trace_selftest_test_probe1_cnt != 2)
 254                goto out_free;
 255        if (trace_selftest_test_probe2_cnt != 1)
 256                goto out_free;
 257        if (trace_selftest_test_probe3_cnt != 3)
 258                goto out_free;
 259        if (trace_selftest_test_global_cnt == 0)
 260                goto out;
 261        if (trace_selftest_test_dyn_cnt == 0)
 262                goto out_free;
 263
 264        DYN_FTRACE_TEST_NAME2();
 265
 266        print_counts();
 267
 268        if (trace_selftest_test_probe1_cnt != 2)
 269                goto out_free;
 270        if (trace_selftest_test_probe2_cnt != 2)
 271                goto out_free;
 272        if (trace_selftest_test_probe3_cnt != 4)
 273                goto out_free;
 274
 275        ret = 0;
 276 out_free:
 277        unregister_ftrace_function(dyn_ops);
 278        kfree(dyn_ops);
 279
 280 out:
 281        /* Purposely unregister in the same order */
 282        unregister_ftrace_function(&test_probe1);
 283        unregister_ftrace_function(&test_probe2);
 284        unregister_ftrace_function(&test_probe3);
 285        unregister_ftrace_function(&test_global);
 286
 287        /* Make sure everything is off */
 288        reset_counts();
 289        DYN_FTRACE_TEST_NAME();
 290        DYN_FTRACE_TEST_NAME();
 291
 292        if (trace_selftest_test_probe1_cnt ||
 293            trace_selftest_test_probe2_cnt ||
 294            trace_selftest_test_probe3_cnt ||
 295            trace_selftest_test_global_cnt ||
 296            trace_selftest_test_dyn_cnt)
 297                ret = -1;
 298
 299        ftrace_enabled = save_ftrace_enabled;
 300
 301        return ret;
 302}
 303
 304/* Test dynamic code modification and ftrace filters */
 305int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
 306                                           struct trace_array *tr,
 307                                           int (*func)(void))
 308{
 309        int save_ftrace_enabled = ftrace_enabled;
 310        int save_tracer_enabled = tracer_enabled;
 311        unsigned long count;
 312        char *func_name;
 313        int ret;
 314
 315        /* The ftrace test PASSED */
 316        printk(KERN_CONT "PASSED\n");
 317        pr_info("Testing dynamic ftrace: ");
 318
 319        /* enable tracing, and record the filter function */
 320        ftrace_enabled = 1;
 321        tracer_enabled = 1;
 322
 323        /* passed in by parameter to fool gcc from optimizing */
 324        func();
 325
 326        /*
 327         * Some archs *cough*PowerPC*cough* add characters to the
 328         * start of the function names. We simply put a '*' to
 329         * accommodate them.
 330         */
 331        func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 332
 333        /* filter only on our function */
 334        ftrace_set_global_filter(func_name, strlen(func_name), 1);
 335
 336        /* enable tracing */
 337        ret = tracer_init(trace, tr);
 338        if (ret) {
 339                warn_failed_init_tracer(trace, ret);
 340                goto out;
 341        }
 342
 343        /* Sleep for a 1/10 of a second */
 344        msleep(100);
 345
 346        /* we should have nothing in the buffer */
 347        ret = trace_test_buffer(tr, &count);
 348        if (ret)
 349                goto out;
 350
 351        if (count) {
 352                ret = -1;
 353                printk(KERN_CONT ".. filter did not filter .. ");
 354                goto out;
 355        }
 356
 357        /* call our function again */
 358        func();
 359
 360        /* sleep again */
 361        msleep(100);
 362
 363        /* stop the tracing. */
 364        tracing_stop();
 365        ftrace_enabled = 0;
 366
 367        /* check the trace buffer */
 368        ret = trace_test_buffer(tr, &count);
 369        tracing_start();
 370
 371        /* we should only have one item */
 372        if (!ret && count != 1) {
 373                trace->reset(tr);
 374                printk(KERN_CONT ".. filter failed count=%ld ..", count);
 375                ret = -1;
 376                goto out;
 377        }
 378
 379        /* Test the ops with global tracing running */
 380        ret = trace_selftest_ops(1);
 381        trace->reset(tr);
 382
 383 out:
 384        ftrace_enabled = save_ftrace_enabled;
 385        tracer_enabled = save_tracer_enabled;
 386
 387        /* Enable tracing on all functions again */
 388        ftrace_set_global_filter(NULL, 0, 1);
 389
 390        /* Test the ops with global tracing off */
 391        if (!ret)
 392                ret = trace_selftest_ops(2);
 393
 394        return ret;
 395}
 396#else
 397# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
 398#endif /* CONFIG_DYNAMIC_FTRACE */
 399
 400/*
 401 * Simple verification test of ftrace function tracer.
 402 * Enable ftrace, sleep 1/10 second, and then read the trace
 403 * buffer to see if all is in order.
 404 */
 405int
 406trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 407{
 408        int save_ftrace_enabled = ftrace_enabled;
 409        int save_tracer_enabled = tracer_enabled;
 410        unsigned long count;
 411        int ret;
 412
 413        /* make sure msleep has been recorded */
 414        msleep(1);
 415
 416        /* start the tracing */
 417        ftrace_enabled = 1;
 418        tracer_enabled = 1;
 419
 420        ret = tracer_init(trace, tr);
 421        if (ret) {
 422                warn_failed_init_tracer(trace, ret);
 423                goto out;
 424        }
 425
 426        /* Sleep for a 1/10 of a second */
 427        msleep(100);
 428        /* stop the tracing. */
 429        tracing_stop();
 430        ftrace_enabled = 0;
 431
 432        /* check the trace buffer */
 433        ret = trace_test_buffer(tr, &count);
 434        trace->reset(tr);
 435        tracing_start();
 436
 437        if (!ret && !count) {
 438                printk(KERN_CONT ".. no entries found ..");
 439                ret = -1;
 440                goto out;
 441        }
 442
 443        ret = trace_selftest_startup_dynamic_tracing(trace, tr,
 444                                                     DYN_FTRACE_TEST_NAME);
 445
 446 out:
 447        ftrace_enabled = save_ftrace_enabled;
 448        tracer_enabled = save_tracer_enabled;
 449
 450        /* kill ftrace totally if we failed */
 451        if (ret)
 452                ftrace_kill();
 453
 454        return ret;
 455}
 456#endif /* CONFIG_FUNCTION_TRACER */
 457
 458
 459#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 460
 461/* Maximum number of functions to trace before diagnosing a hang */
 462#define GRAPH_MAX_FUNC_TEST     100000000
 463
 464static void
 465__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
 466static unsigned int graph_hang_thresh;
 467
 468/* Wrap the real function entry probe to avoid possible hanging */
 469static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
 470{
 471        /* This is harmlessly racy, we want to approximately detect a hang */
 472        if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
 473                ftrace_graph_stop();
 474                printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
 475                if (ftrace_dump_on_oops)
 476                        __ftrace_dump(false, DUMP_ALL);
 477                return 0;
 478        }
 479
 480        return trace_graph_entry(trace);
 481}
 482
 483/*
 484 * Pretty much the same than for the function tracer from which the selftest
 485 * has been borrowed.
 486 */
 487int
 488trace_selftest_startup_function_graph(struct tracer *trace,
 489                                        struct trace_array *tr)
 490{
 491        int ret;
 492        unsigned long count;
 493
 494        /*
 495         * Simulate the init() callback but we attach a watchdog callback
 496         * to detect and recover from possible hangs
 497         */
 498        tracing_reset_online_cpus(tr);
 499        set_graph_array(tr);
 500        ret = register_ftrace_graph(&trace_graph_return,
 501                                    &trace_graph_entry_watchdog);
 502        if (ret) {
 503                warn_failed_init_tracer(trace, ret);
 504                goto out;
 505        }
 506        tracing_start_cmdline_record();
 507
 508        /* Sleep for a 1/10 of a second */
 509        msleep(100);
 510
 511        /* Have we just recovered from a hang? */
 512        if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
 513                tracing_selftest_disabled = true;
 514                ret = -1;
 515                goto out;
 516        }
 517
 518        tracing_stop();
 519
 520        /* check the trace buffer */
 521        ret = trace_test_buffer(tr, &count);
 522
 523        trace->reset(tr);
 524        tracing_start();
 525
 526        if (!ret && !count) {
 527                printk(KERN_CONT ".. no entries found ..");
 528                ret = -1;
 529                goto out;
 530        }
 531
 532        /* Don't test dynamic tracing, the function tracer already did */
 533
 534out:
 535        /* Stop it if we failed */
 536        if (ret)
 537                ftrace_graph_stop();
 538
 539        return ret;
 540}
 541#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 542
 543
 544#ifdef CONFIG_IRQSOFF_TRACER
 545int
 546trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
 547{
 548        unsigned long save_max = tracing_max_latency;
 549        unsigned long count;
 550        int ret;
 551
 552        /* start the tracing */
 553        ret = tracer_init(trace, tr);
 554        if (ret) {
 555                warn_failed_init_tracer(trace, ret);
 556                return ret;
 557        }
 558
 559        /* reset the max latency */
 560        tracing_max_latency = 0;
 561        /* disable interrupts for a bit */
 562        local_irq_disable();
 563        udelay(100);
 564        local_irq_enable();
 565
 566        /*
 567         * Stop the tracer to avoid a warning subsequent
 568         * to buffer flipping failure because tracing_stop()
 569         * disables the tr and max buffers, making flipping impossible
 570         * in case of parallels max irqs off latencies.
 571         */
 572        trace->stop(tr);
 573        /* stop the tracing. */
 574        tracing_stop();
 575        /* check both trace buffers */
 576        ret = trace_test_buffer(tr, NULL);
 577        if (!ret)
 578                ret = trace_test_buffer(&max_tr, &count);
 579        trace->reset(tr);
 580        tracing_start();
 581
 582        if (!ret && !count) {
 583                printk(KERN_CONT ".. no entries found ..");
 584                ret = -1;
 585        }
 586
 587        tracing_max_latency = save_max;
 588
 589        return ret;
 590}
 591#endif /* CONFIG_IRQSOFF_TRACER */
 592
 593#ifdef CONFIG_PREEMPT_TRACER
 594int
 595trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
 596{
 597        unsigned long save_max = tracing_max_latency;
 598        unsigned long count;
 599        int ret;
 600
 601        /*
 602         * Now that the big kernel lock is no longer preemptable,
 603         * and this is called with the BKL held, it will always
 604         * fail. If preemption is already disabled, simply
 605         * pass the test. When the BKL is removed, or becomes
 606         * preemptible again, we will once again test this,
 607         * so keep it in.
 608         */
 609        if (preempt_count()) {
 610                printk(KERN_CONT "can not test ... force ");
 611                return 0;
 612        }
 613
 614        /* start the tracing */
 615        ret = tracer_init(trace, tr);
 616        if (ret) {
 617                warn_failed_init_tracer(trace, ret);
 618                return ret;
 619        }
 620
 621        /* reset the max latency */
 622        tracing_max_latency = 0;
 623        /* disable preemption for a bit */
 624        preempt_disable();
 625        udelay(100);
 626        preempt_enable();
 627
 628        /*
 629         * Stop the tracer to avoid a warning subsequent
 630         * to buffer flipping failure because tracing_stop()
 631         * disables the tr and max buffers, making flipping impossible
 632         * in case of parallels max preempt off latencies.
 633         */
 634        trace->stop(tr);
 635        /* stop the tracing. */
 636        tracing_stop();
 637        /* check both trace buffers */
 638        ret = trace_test_buffer(tr, NULL);
 639        if (!ret)
 640                ret = trace_test_buffer(&max_tr, &count);
 641        trace->reset(tr);
 642        tracing_start();
 643
 644        if (!ret && !count) {
 645                printk(KERN_CONT ".. no entries found ..");
 646                ret = -1;
 647        }
 648
 649        tracing_max_latency = save_max;
 650
 651        return ret;
 652}
 653#endif /* CONFIG_PREEMPT_TRACER */
 654
 655#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
 656int
 657trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
 658{
 659        unsigned long save_max = tracing_max_latency;
 660        unsigned long count;
 661        int ret;
 662
 663        /*
 664         * Now that the big kernel lock is no longer preemptable,
 665         * and this is called with the BKL held, it will always
 666         * fail. If preemption is already disabled, simply
 667         * pass the test. When the BKL is removed, or becomes
 668         * preemptible again, we will once again test this,
 669         * so keep it in.
 670         */
 671        if (preempt_count()) {
 672                printk(KERN_CONT "can not test ... force ");
 673                return 0;
 674        }
 675
 676        /* start the tracing */
 677        ret = tracer_init(trace, tr);
 678        if (ret) {
 679                warn_failed_init_tracer(trace, ret);
 680                goto out_no_start;
 681        }
 682
 683        /* reset the max latency */
 684        tracing_max_latency = 0;
 685
 686        /* disable preemption and interrupts for a bit */
 687        preempt_disable();
 688        local_irq_disable();
 689        udelay(100);
 690        preempt_enable();
 691        /* reverse the order of preempt vs irqs */
 692        local_irq_enable();
 693
 694        /*
 695         * Stop the tracer to avoid a warning subsequent
 696         * to buffer flipping failure because tracing_stop()
 697         * disables the tr and max buffers, making flipping impossible
 698         * in case of parallels max irqs/preempt off latencies.
 699         */
 700        trace->stop(tr);
 701        /* stop the tracing. */
 702        tracing_stop();
 703        /* check both trace buffers */
 704        ret = trace_test_buffer(tr, NULL);
 705        if (ret)
 706                goto out;
 707
 708        ret = trace_test_buffer(&max_tr, &count);
 709        if (ret)
 710                goto out;
 711
 712        if (!ret && !count) {
 713                printk(KERN_CONT ".. no entries found ..");
 714                ret = -1;
 715                goto out;
 716        }
 717
 718        /* do the test by disabling interrupts first this time */
 719        tracing_max_latency = 0;
 720        tracing_start();
 721        trace->start(tr);
 722
 723        preempt_disable();
 724        local_irq_disable();
 725        udelay(100);
 726        preempt_enable();
 727        /* reverse the order of preempt vs irqs */
 728        local_irq_enable();
 729
 730        trace->stop(tr);
 731        /* stop the tracing. */
 732        tracing_stop();
 733        /* check both trace buffers */
 734        ret = trace_test_buffer(tr, NULL);
 735        if (ret)
 736                goto out;
 737
 738        ret = trace_test_buffer(&max_tr, &count);
 739
 740        if (!ret && !count) {
 741                printk(KERN_CONT ".. no entries found ..");
 742                ret = -1;
 743                goto out;
 744        }
 745
 746out:
 747        tracing_start();
 748out_no_start:
 749        trace->reset(tr);
 750        tracing_max_latency = save_max;
 751
 752        return ret;
 753}
 754#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
 755
 756#ifdef CONFIG_NOP_TRACER
 757int
 758trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
 759{
 760        /* What could possibly go wrong? */
 761        return 0;
 762}
 763#endif
 764
 765#ifdef CONFIG_SCHED_TRACER
 766static int trace_wakeup_test_thread(void *data)
 767{
 768        /* Make this a RT thread, doesn't need to be too high */
 769        static const struct sched_param param = { .sched_priority = 5 };
 770        struct completion *x = data;
 771
 772        sched_setscheduler(current, SCHED_FIFO, &param);
 773
 774        /* Make it know we have a new prio */
 775        complete(x);
 776
 777        /* now go to sleep and let the test wake us up */
 778        set_current_state(TASK_INTERRUPTIBLE);
 779        schedule();
 780
 781        /* we are awake, now wait to disappear */
 782        while (!kthread_should_stop()) {
 783                /*
 784                 * This is an RT task, do short sleeps to let
 785                 * others run.
 786                 */
 787                msleep(100);
 788        }
 789
 790        return 0;
 791}
 792
 793int
 794trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
 795{
 796        unsigned long save_max = tracing_max_latency;
 797        struct task_struct *p;
 798        struct completion isrt;
 799        unsigned long count;
 800        int ret;
 801
 802        init_completion(&isrt);
 803
 804        /* create a high prio thread */
 805        p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
 806        if (IS_ERR(p)) {
 807                printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
 808                return -1;
 809        }
 810
 811        /* make sure the thread is running at an RT prio */
 812        wait_for_completion(&isrt);
 813
 814        /* start the tracing */
 815        ret = tracer_init(trace, tr);
 816        if (ret) {
 817                warn_failed_init_tracer(trace, ret);
 818                return ret;
 819        }
 820
 821        /* reset the max latency */
 822        tracing_max_latency = 0;
 823
 824        /* sleep to let the RT thread sleep too */
 825        msleep(100);
 826
 827        /*
 828         * Yes this is slightly racy. It is possible that for some
 829         * strange reason that the RT thread we created, did not
 830         * call schedule for 100ms after doing the completion,
 831         * and we do a wakeup on a task that already is awake.
 832         * But that is extremely unlikely, and the worst thing that
 833         * happens in such a case, is that we disable tracing.
 834         * Honestly, if this race does happen something is horrible
 835         * wrong with the system.
 836         */
 837
 838        wake_up_process(p);
 839
 840        /* give a little time to let the thread wake up */
 841        msleep(100);
 842
 843        /* stop the tracing. */
 844        tracing_stop();
 845        /* check both trace buffers */
 846        ret = trace_test_buffer(tr, NULL);
 847        if (!ret)
 848                ret = trace_test_buffer(&max_tr, &count);
 849
 850
 851        trace->reset(tr);
 852        tracing_start();
 853
 854        tracing_max_latency = save_max;
 855
 856        /* kill the thread */
 857        kthread_stop(p);
 858
 859        if (!ret && !count) {
 860                printk(KERN_CONT ".. no entries found ..");
 861                ret = -1;
 862        }
 863
 864        return ret;
 865}
 866#endif /* CONFIG_SCHED_TRACER */
 867
 868#ifdef CONFIG_CONTEXT_SWITCH_TRACER
 869int
 870trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
 871{
 872        unsigned long count;
 873        int ret;
 874
 875        /* start the tracing */
 876        ret = tracer_init(trace, tr);
 877        if (ret) {
 878                warn_failed_init_tracer(trace, ret);
 879                return ret;
 880        }
 881
 882        /* Sleep for a 1/10 of a second */
 883        msleep(100);
 884        /* stop the tracing. */
 885        tracing_stop();
 886        /* check the trace buffer */
 887        ret = trace_test_buffer(tr, &count);
 888        trace->reset(tr);
 889        tracing_start();
 890
 891        if (!ret && !count) {
 892                printk(KERN_CONT ".. no entries found ..");
 893                ret = -1;
 894        }
 895
 896        return ret;
 897}
 898#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
 899
 900#ifdef CONFIG_BRANCH_TRACER
 901int
 902trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
 903{
 904        unsigned long count;
 905        int ret;
 906
 907        /* start the tracing */
 908        ret = tracer_init(trace, tr);
 909        if (ret) {
 910                warn_failed_init_tracer(trace, ret);
 911                return ret;
 912        }
 913
 914        /* Sleep for a 1/10 of a second */
 915        msleep(100);
 916        /* stop the tracing. */
 917        tracing_stop();
 918        /* check the trace buffer */
 919        ret = trace_test_buffer(tr, &count);
 920        trace->reset(tr);
 921        tracing_start();
 922
 923        if (!ret && !count) {
 924                printk(KERN_CONT ".. no entries found ..");
 925                ret = -1;
 926        }
 927
 928        return ret;
 929}
 930#endif /* CONFIG_BRANCH_TRACER */
 931
 932
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.