linux/kernel/trace/trace_selftest.c
<<
>>
Prefs
   1/* Include in trace.c */
   2
   3#include <linux/stringify.h>
   4#include <linux/kthread.h>
   5#include <linux/delay.h>
   6#include <linux/slab.h>
   7
   8static inline int trace_valid_entry(struct trace_entry *entry)
   9{
  10        switch (entry->type) {
  11        case TRACE_FN:
  12        case TRACE_CTX:
  13        case TRACE_WAKE:
  14        case TRACE_STACK:
  15        case TRACE_PRINT:
  16        case TRACE_BRANCH:
  17        case TRACE_GRAPH_ENT:
  18        case TRACE_GRAPH_RET:
  19                return 1;
  20        }
  21        return 0;
  22}
  23
  24static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
  25{
  26        struct ring_buffer_event *event;
  27        struct trace_entry *entry;
  28        unsigned int loops = 0;
  29
  30        while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
  31                entry = ring_buffer_event_data(event);
  32
  33                /*
  34                 * The ring buffer is a size of trace_buf_size, if
  35                 * we loop more than the size, there's something wrong
  36                 * with the ring buffer.
  37                 */
  38                if (loops++ > trace_buf_size) {
  39                        printk(KERN_CONT ".. bad ring buffer ");
  40                        goto failed;
  41                }
  42                if (!trace_valid_entry(entry)) {
  43                        printk(KERN_CONT ".. invalid entry %d ",
  44                                entry->type);
  45                        goto failed;
  46                }
  47        }
  48        return 0;
  49
  50 failed:
  51        /* disable tracing */
  52        tracing_disabled = 1;
  53        printk(KERN_CONT ".. corrupted trace buffer .. ");
  54        return -1;
  55}
  56
  57/*
  58 * Test the trace buffer to see if all the elements
  59 * are still sane.
  60 */
  61static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
  62{
  63        unsigned long flags, cnt = 0;
  64        int cpu, ret = 0;
  65
  66        /* Don't allow flipping of max traces now */
  67        local_irq_save(flags);
  68        arch_spin_lock(&ftrace_max_lock);
  69
  70        cnt = ring_buffer_entries(tr->buffer);
  71
  72        /*
  73         * The trace_test_buffer_cpu runs a while loop to consume all data.
  74         * If the calling tracer is broken, and is constantly filling
  75         * the buffer, this will run forever, and hard lock the box.
  76         * We disable the ring buffer while we do this test to prevent
  77         * a hard lock up.
  78         */
  79        tracing_off();
  80        for_each_possible_cpu(cpu) {
  81                ret = trace_test_buffer_cpu(tr, cpu);
  82                if (ret)
  83                        break;
  84        }
  85        tracing_on();
  86        arch_spin_unlock(&ftrace_max_lock);
  87        local_irq_restore(flags);
  88
  89        if (count)
  90                *count = cnt;
  91
  92        return ret;
  93}
  94
  95static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  96{
  97        printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
  98                trace->name, init_ret);
  99}
 100#ifdef CONFIG_FUNCTION_TRACER
 101
 102#ifdef CONFIG_DYNAMIC_FTRACE
 103
 104static int trace_selftest_test_probe1_cnt;
 105static void trace_selftest_test_probe1_func(unsigned long ip,
 106                                            unsigned long pip)
 107{
 108        trace_selftest_test_probe1_cnt++;
 109}
 110
 111static int trace_selftest_test_probe2_cnt;
 112static void trace_selftest_test_probe2_func(unsigned long ip,
 113                                            unsigned long pip)
 114{
 115        trace_selftest_test_probe2_cnt++;
 116}
 117
 118static int trace_selftest_test_probe3_cnt;
 119static void trace_selftest_test_probe3_func(unsigned long ip,
 120                                            unsigned long pip)
 121{
 122        trace_selftest_test_probe3_cnt++;
 123}
 124
 125static int trace_selftest_test_global_cnt;
 126static void trace_selftest_test_global_func(unsigned long ip,
 127                                            unsigned long pip)
 128{
 129        trace_selftest_test_global_cnt++;
 130}
 131
 132static int trace_selftest_test_dyn_cnt;
 133static void trace_selftest_test_dyn_func(unsigned long ip,
 134                                         unsigned long pip)
 135{
 136        trace_selftest_test_dyn_cnt++;
 137}
 138
 139static struct ftrace_ops test_probe1 = {
 140        .func                   = trace_selftest_test_probe1_func,
 141};
 142
 143static struct ftrace_ops test_probe2 = {
 144        .func                   = trace_selftest_test_probe2_func,
 145};
 146
 147static struct ftrace_ops test_probe3 = {
 148        .func                   = trace_selftest_test_probe3_func,
 149};
 150
 151static struct ftrace_ops test_global = {
 152        .func                   = trace_selftest_test_global_func,
 153        .flags                  = FTRACE_OPS_FL_GLOBAL,
 154};
 155
 156static void print_counts(void)
 157{
 158        printk("(%d %d %d %d %d) ",
 159               trace_selftest_test_probe1_cnt,
 160               trace_selftest_test_probe2_cnt,
 161               trace_selftest_test_probe3_cnt,
 162               trace_selftest_test_global_cnt,
 163               trace_selftest_test_dyn_cnt);
 164}
 165
 166static void reset_counts(void)
 167{
 168        trace_selftest_test_probe1_cnt = 0;
 169        trace_selftest_test_probe2_cnt = 0;
 170        trace_selftest_test_probe3_cnt = 0;
 171        trace_selftest_test_global_cnt = 0;
 172        trace_selftest_test_dyn_cnt = 0;
 173}
 174
 175static int trace_selftest_ops(int cnt)
 176{
 177        int save_ftrace_enabled = ftrace_enabled;
 178        struct ftrace_ops *dyn_ops;
 179        char *func1_name;
 180        char *func2_name;
 181        int len1;
 182        int len2;
 183        int ret = -1;
 184
 185        printk(KERN_CONT "PASSED\n");
 186        pr_info("Testing dynamic ftrace ops #%d: ", cnt);
 187
 188        ftrace_enabled = 1;
 189        reset_counts();
 190
 191        /* Handle PPC64 '.' name */
 192        func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 193        func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
 194        len1 = strlen(func1_name);
 195        len2 = strlen(func2_name);
 196
 197        /*
 198         * Probe 1 will trace function 1.
 199         * Probe 2 will trace function 2.
 200         * Probe 3 will trace functions 1 and 2.
 201         */
 202        ftrace_set_filter(&test_probe1, func1_name, len1, 1);
 203        ftrace_set_filter(&test_probe2, func2_name, len2, 1);
 204        ftrace_set_filter(&test_probe3, func1_name, len1, 1);
 205        ftrace_set_filter(&test_probe3, func2_name, len2, 0);
 206
 207        register_ftrace_function(&test_probe1);
 208        register_ftrace_function(&test_probe2);
 209        register_ftrace_function(&test_probe3);
 210        register_ftrace_function(&test_global);
 211
 212        DYN_FTRACE_TEST_NAME();
 213
 214        print_counts();
 215
 216        if (trace_selftest_test_probe1_cnt != 1)
 217                goto out;
 218        if (trace_selftest_test_probe2_cnt != 0)
 219                goto out;
 220        if (trace_selftest_test_probe3_cnt != 1)
 221                goto out;
 222        if (trace_selftest_test_global_cnt == 0)
 223                goto out;
 224
 225        DYN_FTRACE_TEST_NAME2();
 226
 227        print_counts();
 228
 229        if (trace_selftest_test_probe1_cnt != 1)
 230                goto out;
 231        if (trace_selftest_test_probe2_cnt != 1)
 232                goto out;
 233        if (trace_selftest_test_probe3_cnt != 2)
 234                goto out;
 235
 236        /* Add a dynamic probe */
 237        dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
 238        if (!dyn_ops) {
 239                printk("MEMORY ERROR ");
 240                goto out;
 241        }
 242
 243        dyn_ops->func = trace_selftest_test_dyn_func;
 244
 245        register_ftrace_function(dyn_ops);
 246
 247        trace_selftest_test_global_cnt = 0;
 248
 249        DYN_FTRACE_TEST_NAME();
 250
 251        print_counts();
 252
 253        if (trace_selftest_test_probe1_cnt != 2)
 254                goto out_free;
 255        if (trace_selftest_test_probe2_cnt != 1)
 256                goto out_free;
 257        if (trace_selftest_test_probe3_cnt != 3)
 258                goto out_free;
 259        if (trace_selftest_test_global_cnt == 0)
 260                goto out;
 261        if (trace_selftest_test_dyn_cnt == 0)
 262                goto out_free;
 263
 264        DYN_FTRACE_TEST_NAME2();
 265
 266        print_counts();
 267
 268        if (trace_selftest_test_probe1_cnt != 2)
 269                goto out_free;
 270        if (trace_selftest_test_probe2_cnt != 2)
 271                goto out_free;
 272        if (trace_selftest_test_probe3_cnt != 4)
 273                goto out_free;
 274
 275        ret = 0;
 276 out_free:
 277        unregister_ftrace_function(dyn_ops);
 278        kfree(dyn_ops);
 279
 280 out:
 281        /* Purposely unregister in the same order */
 282        unregister_ftrace_function(&test_probe1);
 283        unregister_ftrace_function(&test_probe2);
 284        unregister_ftrace_function(&test_probe3);
 285        unregister_ftrace_function(&test_global);
 286
 287        /* Make sure everything is off */
 288        reset_counts();
 289        DYN_FTRACE_TEST_NAME();
 290        DYN_FTRACE_TEST_NAME();
 291
 292        if (trace_selftest_test_probe1_cnt ||
 293            trace_selftest_test_probe2_cnt ||
 294            trace_selftest_test_probe3_cnt ||
 295            trace_selftest_test_global_cnt ||
 296            trace_selftest_test_dyn_cnt)
 297                ret = -1;
/* Add a dynami      299        ftrace_enabled = save_ftrace_enabled;
 300
 301        return ret;
 302}
 303
 304/* Test dynamic code modification and ftrace filters */
 305int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
 306                                           struct trace_array *tr,
 307                                           int (*func)(void))
 308{
 309        int save_ftrace_enabled = ftrace_enabled;
 310        int save_tracer_enabled = tracer_enabled;
 311        unsigned long count;
 312        char *func_name;
 313        int ret;
 314
 315        /* The ftrace test PASSED */
 316        printk(KERN_CONT "PASSED\n");
 317        pr_info("Testing dynamic ftrace: ");
 31a>        /* Add a dynam319" id="L319" class="line" name="L319"> 319        /* enable tracing, and record the filter function */
 320        ftrace_enabled = 1;
 321        tracer_enabled = 1;
 322
 323        /* passed in by parameter to fool gcc from optimizing */
 324        func();
 325
 326        /*
 327         * Some archs *cough*PowerPC*cough* add characters to the
 328         * start of the function names. We simply put a '*' to
 329         * accommodate them.
 330         */
 331        func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 332
 333        /* filter only on our function */
 334        ftrace_set_global_filter(func_name, strlen(func_name), 1);
 335
 336        /* enable tracing */
 337        ret = tracer_init(trace, tr);
 338        if (ret) {
 339                warn_failed_init_tracer(trace, ret);
 340                goto out;
 341        }
 342
 343        /* Sleep for a 1/10 of a second */
 344        msleep(100);
 345
 346        /* we should have nothing in the buffer */
 347        ret = trace_test_buffer(tr, &count);
 348        if (ret)
 349                goto out;
 350
 351        if (count) {
 352                ret = -1;
 353                printk(KERN_CONT ".. filter did not filter .. ");
 354                goto out;
 355        }
 356
 357        /* call our function again */
 358        func();
 359
 360        /* sleep again */
 361        msleep(100);
 362
 363        /* stop the tracing. */
 364        tracing_stop();
 365        ftrace_enabled = 0;
 366
 367        /* check the trace buffer */
 368        ret = trace_test_buffer(tr, &count);
 369        tracing_start();
 370
 371        /* we should only have one item */
 372        if (!ret && count != 1) {
 373                trace->reset(tr);
 374                printk(KERN_CONT ".. filter failed count=%ld ..", count);
 375                ret = -1;
 376                goto out;
 377        }
 37a>        /* Add a dynam379" id="L379" class="line" name="L379"> 379        /* Test the ops with global tracing running */
 380        ret = trace_selftest_ops(1);
 381        trace->reset(tr);
 382
 383 out:
 384        ftrace_enabled = save_ftrace_enabled;
 385        tracer_enabled = save_tracer_enabled;
 386
 387        /* Enable tracing on all functions again */
 388        ftrace_set_global_filter(NULL, 0, 1);
 389
 390        /* Test the ops with global tracing off */
 391        if (!ret)
 392                ret = trace_selftest_ops(2);
 393
 394        return ret;
 395}
 396#else
 397# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
/* CONFIG_DYNAMIC_FTRACE */
 399
 400/*
 401 * Simple verification test of ftrace function tracer.
 402 * Enable ftrace, sleep 1/10 second, and then read the trace
 403 * buffer to see if all is in order.
 404 */
 405int
 406trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 407{
 408        int save_ftrace_enabled = ftrace_enabled;
 409        int save_tracer_enabled = tracer_enabled;
 410        unsigned long count;
 411        int ret;
 412
 413        /* make sure msleep has been recorded */
 414        msleep(1);
 415
 416        /* start the tracing */
 417        ftrace_enabled = 1;
 418        tracer_enabled = 1;
 419
 420        ret = tracer_init(trace, tr);
 421        if (ret) {
 422                warn_failed_init_tracer(trace, ret);
 423                goto out;
 424        }
 425
 426        /* Sleep for a 1/10 of a second */
 427        msleep(100);
 428        /* stop the tracing. */
 429        tracing_stop();
 430        ftrace_enabled = 0;
 431
 432        /* check the trace buffer */
 433        ret = trace_test_buffer(tr, &count);
 434        trace->reset(tr);
 435        tracing_start();
 436
 437        if (!ret && !count) {
 438                printk(KERN_CONT ".. no entries found ..");
 439                ret = -1;
 440                goto out;
 441        }
 442
 443        ret = trace_selftest_startup_dynamic_tracing(trace, tr,
 444                                                     DYN_FTRACE_TEST_NAME);
 445
 446 out:
 447        ftrace_enabled = save_ftrace_enabled;
 448        tracer_enabled = save_tracer_enabled;
 449
 450        /* kill ftrace totally if we failed */
 451        if (ret)
 452                ftrace_kill();
 453
 454        return ret;
 455}
 456>   #endif /* CONFIG_FUNCTION_TRACER */
 457
 45a>        /* Add a dynam459" id="L459" class="line" name="L459"> 459#ifdef CONFIG_FUNCTION_GRAPH_TRACER>        /* Add a dynam460" id="L460" class="line" name="L460"> 460
 461/* Maximum number of functions to trace before diagnosing a hang */
 462#define GRAPH_MAX_FUNC_TEST     100000000
 463
 464static void
 465__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
 466static unsigned int graph_hang_thresh;
 467
 468/* Wrap the real function entry probe to avoid possible hanging */
 469static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
 470{
 471        /* This is harmlessly racy, we want to approximately detect a hang */
 472        if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
 473                ftrace_graph_stop();
 474                printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
 475                if (ftrace_dump_on_oops)
 476                        __ftrace_dump(false, DUMP_ALL);
 477                return 0;
 478        }
 479
 480        return trace_graph_entry(trace);
 481}
 482
 483/*
 484 * Pretty much the same than for the function tracer from which the selftest
 485 * has been borrowed.
 486 */
 487int
 488trace_selftest_startup_function_graph(struct tracer *trace,
 489                                        struct trace_array *tr)
 490{
 491        int ret;
 492        unsigned long count;
 493
 494        /*
 495         * Simulate the init() callback but we attach a watchdog callback
 496         * to detect and recover from possible hangs
 497         */
tracing_reset_online_cpus(tr);
 499        set_graph_array(tr);
 500        ret = register_ftrace_graph(&trace_graph_return,
 501                                    &trace_graph_entry_watchdog);
 502        if (ret) {
 503                warn_failed_init_tracer(trace, ret);
 504                goto out;
 505        }
 506        tracing_start_cmdline_record();
 507
 508        /* Sleep for a 1/10 of a second */
 509        msleep(100);
 510
 511        /* Have we just recovered from a hang? */
 512        if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
 513                tracing_selftest_disabled = true;
 514                ret = -1;
 515                goto out;
 516        }
 517
 518        tracing_stop();
 519
 520        /* check the trace buffer */
 521        ret = trace_test_buffer(tr, &count);
 522
 523        trace->reset(tr);
 524        tracing_start();
 525
 526        if (!ret && !count) {
 527                printk(KERN_CONT ".. no entries found ..");
 528                ret = -1;
 529                goto out;
 530        }
 531
 532        /* Don't test dynamic tracing, the function tracer already did */
 533
 534out:
 535        /* Stop it if we failed */
 536        if (ret)
 537                ftrace_graph_stop();
 53a>        /* Add a dynam539" id="L539" class="line" name="L539"> 539        return ret;
 540}
 541#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 542
 543
 544#ifdef CONFIG_IRQSOFF_TRACER
 545int
 546trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
 547{
 548        unsigned long save_max = tracing_max_latency;
 549        unsigned long count;
 550        int ret;
 551
 552        /* start the tracing */
 553        ret = tracer_init(trace, tr);
 554        if (ret) {
 555                warn_failed_init_tracer(trace, ret);
 556                return ret;
 557        }
 55a>        /* Add a dynam559" id="L559" class="line" name="L559"> 559        /* reset the max latency */
 560        tracing_max_latency = 0;
 561        /* disable interrupts for a bit */
 562        local_irq_disable();
 563        udelay(100);
 564        local_irq_enable();
 565
 566        /*
 567         * Stop the tracer to avoid a warning subsequent
 568         * to buffer flipping failure because tracing_stop()
 569         * disables the tr and max buffers, making flipping impossible
 570         * in case of parallels max irqs off latencies.
 571         */
 572        trace->stop(tr);
 573        /* stop the tracing. */
 574        tracing_stop();
 575        /* check both trace buffers */
 576        ret = trace_test_buffer(tr, NULL);
 577        if (!ret)
 578                ret = trace_test_buffer(&max_tr, &count);
 579        trace->reset(tr);
 580        tracing_start();
 581
 582        if (!ret && !count) {
 583                printk(KERN_CONT ".. no entries found ..");
 584                ret = -1;
 585        }
 586
 587        tracing_max_latency = save_max;
 58a>        /* Add a dynam589" id="L589" class="line" name="L589"> 589        return ret;
 590}
 591#endif /* CONFIG_IRQSOFF_TRACER */
 592
 593#ifdef CONFIG_PREEMPT_TRACER
 594int
 595trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
 596{
 597        unsigned long save_max = tracing_max_latency;
count;
 599        int ret;
 600
 601        /*
 602         * Now that the big kernel lock is no longer preemptable,
 603         * and this is called with the BKL held, it will always
 604         * fail. If preemption is already disabled, simply
 605         * pass the test. When the BKL is removed, or becomes
 606         * preemptible again, we will once again test this,
 607         * so keep it in.
 608         */
 609        if (preempt_count()) {
 610                printk(KERN_CONT "can not test ... force ");
 611                return 0;
 612        }
 613
 614        /* start the tracing */
 615        ret = tracer_init(trace, tr);
 616        if (ret) {
 617                warn_failed_init_tracer(trace, ret);
 618                return ret;
 619        }
 620
 621        /* reset the max latency */
 622        tracing_max_latency = 0;
 623        /* disable preemption for a bit */
 624        preempt_disable();
 625        udelay(100);
 626        preempt_enable();
 627
 628        /*
 629         * Stop the tracer to avoid a warning subsequent
 630         * to buffer flipping failure because tracing_stop()
 631         * disables the tr and max buffers, making flipping impossible
 632         * in case of parallels max preempt off latencies.
 633         */
 634        trace->stop(tr);
 635        /* stop the tracing. */
 636        tracing_stop();
 637        /* check both trace buffers */
 638        ret = trace_test_buffer(tr, NULL);
 639        if (!ret)
 640                ret = trace_test_buffer(&max_tr, &count);
 641        trace->reset(tr);
 642        tracing_start();
 643
 644        if (!ret && !count) {
 645                printk(KERN_CONT ".. no entries found ..");
 646                ret = -1;
 647        }
 64a>        /* Add a dynam649" id="L649" class="line" name="L649"> 649        tracing_max_latency = save_max;
 650
 651        return ret;
 652}
 653#endif /* CONFIG_PREEMPT_TRACER */
 654
 655#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
 656int
 657trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
 65a>   {
 659        unsigned long save_max = tracing_max_latency;
 660        unsigned long count;
 661        int ret;
 662
 663        /*
 664         * Now that the big kernel lock is no longer preemptable,
 665         * and this is called with the BKL held, it will always
 666         * fail. If preemption is already disabled, simply
 667         * pass the test. When the BKL is removed, or becomes
 668         * preemptible again, we will once again test this,
 669         * so keep it in.
 670         */
 671        if (preempt_count()) {
 672                printk(KERN_CONT "can not test ... force ");
 673                return 0;
 674        }
 675
 676        /* start the tracing */
 677        ret = tracer_init(trace, tr);
 678        if (ret) {
 679                warn_failed_init_tracer(trace, ret);
 680                goto out_no_start;
 681        }
 682
 683        /* reset the max latency */
 684        tracing_max_latency = 0;
 685
 686        /* disable preemption and interrupts for a bit */
 687        preempt_disable();
 688        local_irq_disable();
 689        udelay(100);
 690        preempt_enable();
 691        /* reverse the order of preempt vs irqs */
 692        local_irq_enable();
 693
 694        /*
 695         * Stop the tracer to avoid a warning subsequent
 696         * to buffer flipping failure because tracing_stop()
 697         * disables the tr and max buffers, making flipping impossible
         * in case of parallels max irqs/preempt off latencies.
 699         */
 700        trace->stop(tr);
 701        /* stop the tracing. */
 702        tracing_stop();
 703        /* check both trace buffers */
 704        ret = trace_test_buffer(tr, NULL);
 705        if (ret)
 706                goto out;
 707
 708        ret = trace_test_buffer(&max_tr, &count);
 709        if (ret)
 710                goto out;
 711
 712        if (!ret && !count) {
 713                printk(KERN_CONT ".. no entries found ..");
 714                ret = -1;
 715                goto out;
 716        }
 717
 718        /* do the test by disabling interrupts first this time */
 719        tracing_max_latency = 0;
 720        tracing_start();
 721        trace->start(tr);
 722
 723        preempt_disable();
 724        local_irq_disable();
 725        udelay(100);
 726        preempt_enable();
 727        /* reverse the order of preempt vs irqs */
 728        local_irq_enable();
 729
 730        trace->stop(tr);
 731        /* stop the tracing. */
 732        tracing_stop();
 733        /* check both trace buffers */
 734        ret = trace_test_buffer(tr, NULL);
 735        if (ret)
 736                goto out;
 737
 738        ret = trace_test_buffer(&max_tr, &count);
 739
 740        if (!ret && !count) {
 741                printk(KERN_CONT ".. no entries found ..");
 742                ret = -1;
 743                goto out;
 744        .c#L744" id="L744" class="line" name="L744"a>
t2href="+code=tracer_"ts fioment"="comment">/* che5744"a>
t2href="+cod61" c,4oment"="comment">/* che57tr
t2href="+code=traclass="sret = -1;
 720         647<7a>        }
 680                he57tr
t2href="+code=tracs="sref">8"> 64a>        /* Add a dynam649" id="L640" class="line" name="L730"> 730        trace->reset(s7ve_max;
 560        tracing_max_latency =  650
ret;
 651        return  652}
 740  ass="line" name="L653"> 653#endif /* CONFIG_PR7 name="L674"> 654

t2href="+code=traG_PREEMPT7TRACER)
definedNOPdefineds="line" name="L744"a>
t2href="+code=traGlass="sre 656int
tr)
trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * 65a>   {
;
 653#endif /* CONFIG_PR7ass="sref7>count;
tracingclass="sr7f">ret;
 662
/*
definedSCHEDdefined/trace_selftest.c#L654" id="L654" class="line7r_PREEMPT7ways
trwakefte"lincth6 69truct trace_array * && ! 653#endif /* CONFIG_PR7aame="L657his,
/* Add e_stictconstsref">tracer *trched_ame=mef">pr1" id="L741" clme=m class="sref">tame=mef">p= { .acer *p= 5 }e=tracing_max_latency" class="sref">tracingso keep i7 in.
/* Add e_f">tracer *t    unsigned long tracingsss="sref7  */
()) {
trace-&curre"+code=ret" class=urre"+"sref">trace_test_bufSCHEDdFIFOcode=ret" classSCHEDdFIFOtest_buffer(&tame=mef">" class="sref">reset();
haciaa new ario"> 653#endif /* CONFIG_PR774"> 674<7a>        }
trace-&" class="sref">t    u" class="sref">reset( 675
 653#endif /* CONFIG_PR77class="s7f">tr);
trace-&TASK_INTERRUPTIBLEef="+code=traceTASK_INTERRUPTIBLE   u" class="sref">reset(/* Add a dynam649" id=schedul_elftest_startupschedu   lolass="sre7">ret);
out_n7_start;
ar">awake,anow6wait ss=    ppear"> 653#endif /* CONFIG_PR781"> 681<7a>        }
 && ! 682
 694        
         * Stop the tracer to avoid a war7_max_late7cy = 0;
 669         *7 name="L675"> 685
#endif /* CONFIG_PR78he traci7t */
tsllin725"> 725        ();
udel7y(100);
tracingpreempt_e7able();

 693
trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array */*

 659        unsigned long save_max = tr7tracing_s7op()
tracer *t class="sref">saask_ref">ta>, struct tr7teempt_di7ible
tracer * 680ise=ef="+code=tracing_max_latency" class="sref">tr7tl_irq_di7ies.
 660        unsigned long       8  */
 661        int tr);
 680ise=ef="5        tracing8stop();
#endif /* CONFIG_PR8ass="sref8>NULL);
trace->654"wakefte"lincth6trwakefte"lincth6(& 680ise=ef=",printk(t-"linKERN_CONT ".. no entries f8class="sr8f">ret)
IS_ERds="sref">trace-&t="line" name="Lpef="ret && !out;
 741                printk(t wakeftlass="co6 ".. no entries f8c8ass="sr8fble
                );
ret)
out;
 653#endif /* CONFIG_PR8 name="L781"> 711
 680ise=ef="5        8ount) {
 676        /* start8="sref">r8t = -1;
 677        ret = tracer_init(trace, out;
 678        if ( 716<8a>        }
 679                warn_failed_init_tracer(trace,  717
 661        int 
 = 0;
 683        /* reset the8class="sr8f">tr);
 719        tracin8 name="L782"> 722
 653#endif /* CONFIG_PR8al_irq_di8able();
tsllin725"> 725        udel8y(100);
#endif /* CONFIG_PR8aname="L78s */
       "line"    som   * disables the tr and max buffers, making flip8cal_irq_e8able();
 729
         * preemptible again, we will once a8class="sr8f">tr);
< nas/akee="L669"> 669         *8he tracin8. */
 e="L669"> 669         *8heempt_di8s */
t doesah ppen som n c"L6ne"horr      * disables the tr and max buffers, making flip8ass="sref8>NULL);
 665 669         *8hef">udel8f">ret)
#endif /* CONFIG_PR8class="sr8f">out;
 737
 679        ke_ftestocesss="sref">trace-&t="line" name="Lpef="r        );
 739
 653#endif /* CONFIG_PR8s="sref">8ount) {
tsllin725"> 725        r8t = -1;
 731        /* stop 8class="sr8f">out;
 732         744<8a>        .c#L744" id="L844" c8ce/trace_selftest.c#L733" id="L733" class="line" name="L733"> 733        /* check both t8cer_"ts f8oment"="comment">/* che5844"a>84e/trace_selftest.c#L726" id="L734" class="line" name="L734"> 734        ret = trace_test_buffer(tr, trace_array * 647<8a>        }
 734        ret = trace_test_buffer(&max_tr, & 64a>        s8ve_max;
 650
 721        trace->reset(ret;
 720         652}
 684         659        u  744<84"> 654

t2href="+code=tr8G_PREEMPT8TRACER)
        /* check both t8Glass="sr8 656int
tracet="line" name="Lpef="r        tr)
 65a>   {
 740        if (!ret && !;
 741                printk(KERN_CONT ".. no entries f8ass="sref8>count;
 742                ret;
 662
/*
 661        int 

/* check both t8sabled, s8mply

defined="srEXT_SWITCHdefined/trace_selftest.c#L737" id="L737" class="line8cs="sref"8his,

 734        ass="lince_selftesched_lwitch1rqsoff" class="sref">trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
()) {
 660        unsigned long );
 661        int  674<8a>        }
 676        /* start8f_PREEMPT85"> 675
 734        ret = tracer_init(trace, 
 678        if (tr);
 679                warn_failed_init_tracer(trace,  661        int ret);
out_n8_start;
 681<8a>        }
        /* check both t8 name="L682"> 682
tsllin725"> 725         731        /* stop 8_max_late8cy = 0;
 732         685
 731        /* stop 8_he traci8t */
 734        ret = (&max_tr, &();
 721        trace->reset(();
 720        udel8y(100);
 740        if (!ret && !
 741                printk(KERN_CONT ".. no entries f8cal_irq_e8able();
 742                 693
/*

t2href="+code=tr8ing subse8uent
 661        int 

 731        /* stop 8tl_irq_di8ies.
      9  */
definedBRANCHdefined/trace_selftest.c#L739" id="L739" class="line9t1>      9 ble();

 734        ass="lince_selftebranch1rqsoff" class="sref">trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tracing9stop();

 660        unsigned long  661        int ret)
out;
 676        /* start9c8ass="sr9fble
 708        ret = tracer_init(trace, );
ret)
 679                warn_failed_init_tracer(trace, out;
 661        int  711
9ount) {
        /* check both t9="sref">r9t = -1;
tsllin725"> 725        out;
 731        /* stop 916"> 716<9a>        }
 732         717
 731        /* stop 9t this ti9e */
 734        ret = (&max_tr, & = 0;
 721        trace->reset(tracing_9tart();
 720        tr);
 722
 740        if (!ret && !();
 741                printk(KERN_CONT ".. no entries f9al_irq_di9able();
udel9y(100);
();

 661        int ();
 729
 731        /* stop 9class="sr9f">tr);



c/div>

footcod> Tne"original LXR softwar">byiine"e_selfteshttp://source ge.net/projects/lxod>LXR id="unittef">,t nne"experi"L59al version>byie_selftesm to:lxo@ lxo@ . c/div>
subfootcod> lxo. byie_selfteshttp://www.redpill- Redpill L,tprovide" 27"L