linux/tools/perf/builtin-sched.c
<<
>>
Prefs
   1#include "builtin.h"
   2#include "perf.h"
   3
   4#include "util/util.h"
   5#include "util/evlist.h"
   6#include "util/cache.h"
   7#include "util/evsel.h"
   8#include "util/symbol.h"
   9#include "util/thread.h"
  10#include "util/header.h"
  11#include "util/session.h"
  12#include "util/tool.h"
  13
  14#include "util/parse-options.h"
  15#include "util/trace-event.h"
  16
  17#include "util/debug.h"
  18
  19#include <sys/prctl.h>
  20#include <sys/resource.h>
  21
  22#include <semaphore.h>
  23#include <pthread.h>
  24#include <math.h>
  25
  26#define PR_SET_NAME             15               /* Set process name */
  27#define MAX_CPUS                4096
  28#define COMM_LEN                20
  29#define SYM_LEN                 129
  30#define MAX_PID                 65536
  31
  32struct sched_atom;
  33
  34struct task_desc {
  35        unsigned long           nr;
  36        unsigned long           pid;
  37        char                    comm[COMM_LEN];
  38
  39        unsigned long           nr_events;
  40        unsigned long           curr_event;
  41        struct sched_atom       **atoms;
  42
  43        pthread_t               thread;
  44        sem_t                   sleep_sem;
  45
  46        sem_t                   ready_for_work;
  47        sem_t                   work_done_sem;
  48
  49        u64                     cpu_usage;
  50};
  51
  52enum sched_event_type {
  53        SCHED_EVENT_RUN,
  54        SCHED_EVENT_SLEEP,
  55        SCHED_EVENT_WAKEUP,
  56        SCHED_EVENT_MIGRATION,
  57};
  58
  59struct sched_atom {
  60        enum sched_event_type   type;
  61        int                     specific_wait;
  62        u64                     timestamp;
  63        u64                     duration;
  64        unsigned long           nr;
  65        sem_t                   *wait_sem;
  66        struct task_desc        *wakee;
  67};
  68
  69#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
  70
  71enum thread_state {
  72        THREAD_SLEEPING = 0,
  73        THREAD_WAIT_CPU,
  74        THREAD_SCHED_IN,
  75        THREAD_IGNORE
  76};
  77
  78struct work_atom {
  79        struct list_head        list;
  80        enum thread_state       state;
  81        u64                     sched_out_time;
  82        u64                     wake_up_time;
  83        u64                     sched_in_time;
  84        u64                     runtime;
  85};
  86
  87struct work_atoms {
  88        struct list_head        work_list;
  89        struct thread           *thread;
  90        struct rb_node          node;
  91        u64                     max_lat;
  92        u64                     max_lat_at;
  93        u64                     total_lat;
  94        u64                     nb_atoms;
  95        u64                     total_runtime;
  96};
  97
  98typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
  99
 100struct perf_sched;
 101
 102struct trace_sched_handler {
 103        int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel,
 104                            struct perf_sample *sample, struct machine *machine);
 105
 106        int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel,
 107                             struct perf_sample *sample, struct machine *machine);
 108
 109        int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel,
 110                            struct perf_sample *sample, struct machine *machine);
 111
 112        int (*fork_event)(struct perf_sched *sched, struct perf_evsel *evsel,
 113                          struct perf_sample *sample);
 114
 115        int (*migrate_task_event)(struct perf_sched *sched,
 116                                  struct perf_evsel *evsel,
 117                                  struct perf_sample *sample,
 118                                  struct machine *machine);
 119};
 120
 121struct perf_sched {
 122        struct perf_tool tool;
 123        const char       *input_name;
 124        const char       *sort_order;
 125        unsigned long    nr_tasks;
 126        struct task_desc *pid_to_task[MAX_PID];
 127        struct task_desc **tasks;
 128        const struct trace_sched_handler *tp_handler;
 129        pthread_mutex_t  start_work_mutex;
 130        pthread_mutex_t  work_done_wait_mutex;
 131        int              profile_cpu;
 132/*
 133 * Track the current task - that way we can know whether there's any
 134 * weird events, such as a task being switched away that is not current.
 135 */
 136        int              max_cpu;
 137        u32              curr_pid[MAX_CPUS];
 138        struct thread    *curr_thread[MAX_CPUS];
 139        char             next_shortname1;
 140        char             next_shortname2;
 141        unsigned int     replay_repeat;
 142        unsigned long    nr_run_events;
 143        unsigned long    nr_sleep_events;
 144        unsigned long    nr_wakeup_events;
 145        unsigned long    nr_sleep_corrections;
 146        unsigned long    nr_run_events_optimized;
 147        unsigned long    targetless_wakeups;
 148        unsigned long    multitarget_wakeups;
 149        unsigned long    nr_runs;
 150        unsigned long    nr_timestamps;
 151        unsigned long    nr_unordered_timestamps;
 152        unsigned long    nr_state_machine_bugs;
 153        unsigned long    nr_context_switch_bugs;
 154        unsigned long    nr_events;
 155        unsigned long    nr_lost_chunks;
 156        unsigned long    nr_lost_events;
 157        u64              run_measurement_overhead;
 158        u64              sleep_measurement_overhead;
 159        u64              start_time;
 160        u64              cpu_usage;
 161        u64              runavg_cpu_usage;
 162        u64              parent_cpu_usage;
 163        u64              runavg_parent_cpu_usage;
 164        u64              sum_runtime;
 165        u64              sum_fluct;
 166        u64              run_avg;
 167        u64              all_runtime;
 168        u64              all_count;
 169        u64              cpu_last_switched[MAX_CPUS];
 170        struct rb_root   atom_root, sorted_atom_root;
 171        struct list_head sort_list, cmp_pid;
 172};
 173
 174static u64 get_nsecs(void)
 175{
 176        struct timespec ts;
 177
 178        clock_gettime(CLOCK_MONOTONIC, &ts);
 179
 180        return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
 181}
 182
 183static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
 184{
 185        u64 T0 = get_nsecs(), T1;
 186
 187        do {
 188                T1 = get_nsecs();
 189        } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
 190}
 191
 192static void sleep_nsecs(u64 nsecs)
 193{
 194        struct timespec ts;
 195
 196        ts.tv_nsec = nsecs % 999999999;
 197        ts.tv_sec = nsecs / 999999999;
 198
 199        nanosleep(&ts, NULL);
 200}
 201
 202static void calibrate_run_measurement_overhead(struct perf_sched *sched)
 203{
 204        u64 T0, T1, delta, min_delta = 1000000000ULL;
 205        int i;
 206
 207        for (i = 0; i < 10; i++) {
 208                T0 = get_nsecs();
 209                burn_nsecs(sched, 0);
 210                T1 = get_nsecs();
 211                delta = T1-T0;
 212                min_delta = min(min_delta, delta);
 213        }
 214        sched->run_measurement_overhead = min_delta;
 215
 216        printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
 217}
 218
 219static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
 220{
 221        u64 T0, T1, delta, min_delta = 1000000000ULL;
 222        int i;
 223
 224        for (i = 0; i < 10; i++) {
 225                T0 = get_nsecs();
 226                sleep_nsecs(10000);
 227                T1 = get_nsecs();
 228                delta = T1-T0;
 229                min_delta = min(min_delta, delta);
 230        }
 231        min_delta -= 10000;
 232        sched->sleep_measurement_overhead = min_delta;
 233
 234        printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
 235}
 236
 237static struct sched_atom *
 238get_new_event(struct task_desc *task, u64 timestamp)
 239{
 240        struct sched_atom *event = zalloc(sizeof(*event));
 241        unsigned long idx = task->nr_events;
 242        size_t size;
 243
 244        event->timestamp = timestamp;
 245        event->nr = idx;
 246
 247        task->nr_events++;
 248        size = sizeof(struct sched_atom *) * task->nr_events;
 249        task->atoms = realloc(task->atoms, size);
 250        BUG_ON(!task->atoms);
 251
 252        task->atoms[idx] = event;
 253
 254        return event;
 255}
 256
 257static struct sched_atom *last_event(struct task_desc *task)
 258{
 259        if (!task->nr_events)
 260                return NULL;
 261
 262        return task->atoms[task->nr_events - 1];
 263}
 264
 265static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
 266                                u64 timestamp, u64 duration)
 267{
 268        struct sched_atom *event, *curr_event = last_event(task);
 269
 270        /*
 271         * optimize an existing RUN event by merging this one
 272         * to it:
 273         */
 274        if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
 275                sched->nr_run_events_optimized++;
 276                curr_event->duration += duration;
 277                return;
 278        }
 279
 280        event = get_new_event(task, timestamp);
 281
 282        event->type = SCHED_EVENT_RUN;
 283        event->duration = duration;
 284
 285        sched->nr_run_events++;
 286}
 287
 288static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
 289                                   u64 timestamp, struct task_desc *wakee)
 290{
 291        struct sched_atom *event, *wakee_event;
 292
 293        event = get_new_event(task, timestamp);
 294        event->type = SCHED_EVENT_WAKEUP;
 295        event->wakee = wakee;
 296
 297        wakee_event = last_event(wakee);
 298        if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
 299                sched->targetless_wakeups++;
 300                return;
 301        }
 302        if (wakee_event->wait_sem) {
 303                sched->multitarget_wakeups++;
 304                return;
 305        }
 306
 307        wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
 308        sem_init(wakee_event->wait_sem, 0, 0);
 309        wakee_event->specific_wait = 1;
 310        event->wait_sem = wakee_event->wait_sem;
 311
 312        sched->nr_wakeup_events++;
 313}
 314
 315static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
 316                                  u64 timestamp, u64 task_state __maybe_unused)
 317{
 318        struct sched_atom *event = get_new_event(task, timestamp);
 319
 320        event->type = SCHED_EVENT_SLEEP;
 321
 322        sched->nr_sleep_events++;
 323}
 324
 325static struct task_desc *register_pid(struct perf_sched *sched,
 326                                      unsigned long pid, const char *comm)
 327{
 328        struct task_desc *task;
 329
 330        BUG_ON(pid >= MAX_PID);
 331
 332        task = sched->pid_to_task[pid];
 333
 334        if (task)
 335                return task;
 336
 337        task = zalloc(sizeof(*task));
 338        task->pid = pid;
 339        task->nr = sched->nr_tasks;
 340        strcpy(task->comm, comm);
 341        /*
 342         * every task starts in sleeping state - this gets ignored
 343         * if there's no wakeup pointing to this sleep state:
 344         */
 345        add_sched_event_sleep(sched, task, 0, 0);
 346
 347        sched->pid_to_task[pid] = task;
 348        sched->nr_tasks++;
 349        sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_task *));
 350        BUG_ON(!sched->tasks);
 351        sched->tasks[task->nr] = task;
 352
 353        if (verbose)
 354                printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
 355
 356        return task;
 357}
 358
 359
 360static void print_task_traces(struct perf_sched *sched)
 361{
 362        struct task_desc *task;
 363        unsigned long i;
 364
 365        for (i = 0; i < sched->nr_tasks; i++) {
 366                task = sched->tasks[i];
 367                printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
 368                        task->nr, task->comm, task->pid, task->nr_events);
 369        }
 370}
 371
 372static void add_cross_task_wakeups(struct perf_sched *sched)
 373{
 374        struct task_desc *task1, *task2;
 375        unsigned long i, j;
 376
 377        for (i = 0; i < sched->nr_tasks; i++) {
 378                task1 = sched->tasks[i];
 379                j = i + 1;
 380                if (j == sched->nr_tasks)
 381                        j = 0;
 382                task2 = sched->tasks[j];
 383                add_sched_event_wakeup(sched, task1, 0, task2);
 384        }
 385}
 386
 387static void perf_sched__process_event(struct perf_sched *sched,
 388                                      struct sched_atom *atom)
 389{
 390        int ret = 0;
 391
 392        switch (atom->type) {
 393                case SCHED_EVENT_RUN:
 394                        burn_nsecs(sched, atom->duration);
 395                        break;
 396                case SCHED_EVENT_SLEEP:
 397                        if (atom->wait_sem)
 398                                ret = sem_wait(atom->wait_sem);
 399                        BUG_ON(ret);
 400                        break;
 401                case SCHED_EVENT_WAKEUP:
 402                        if (atom->wait_sem)
 403                                ret = sem_post(atom->wait_sem);
 404                        BUG_ON(ret);
 405                        break;
 406                case SCHED_EVENT_MIGRATION:
 407                        break;
 408                default:
 409                        BUG_ON(1);
 410        }
 411}
 412
 413static u64 get_cpu_usage_nsec_parent(void)
 414{
 415        struct rusage ru;
 416        u64 sum;
 417        int err;
 418
 419        err = getrusage(RUSAGE_SELF, &ru);
 420        BUG_ON(err);
 421
 422        sum =  ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
 423        sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
 424
 425        return sum;
 426}
 427
 428static int self_open_counters(void)
 429{
 430        struct perf_event_attr attr;
 431        int fd;
 432
 433        memset(&attr, 0, sizeof(attr));
 434
 435        attr.type = PERF_TYPE_SOFTWARE;
 436        attr.config = PERF_COUNT_SW_TASK_CLOCK;
 437
 438        fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
 439
 440        if (fd < 0)
 441                pr_err("Error: sys_perf_event_open() syscall returned "
 442                       "with %d (%s)\n", fd, strerror(errno));
 443        return fd;
 444}
 445
 446static u64 get_cpu_usage_nsec_self(int fd)
 447{
 448        u64 runtime;
 449        int ret;
 450
 451        ret = read(fd, &runtime, sizeof(runtime));
 452        BUG_ON(ret != sizeof(runtime));
 453
 454        return runtime;
 455}
 456
 457struct sched_thread_parms {
 458        struct task_desc  *task;
 459        struct perf_sched *sched;
 460};
 461
 462static void *thread_func(void *ctx)
 463{
 464        struct sched_thread_parms *parms = ctx;
 465        struct task_desc *this_task = parms->task;
 466        struct perf_sched *sched = parms->sched;
 467        u64 cpu_usage_0, cpu_usage_1;
 468        unsigned long i, ret;
 469        char comm2[22];
 470        int fd;
 471
 472        free(parms);
 473
 474        sprintf(comm2, ":%s", this_task->comm);
 475        prctl(PR_SET_NAME, comm2);
 476        fd = self_open_counters();
 477        if (fd < 0)
 478                return NULL;
 479again:
 480        ret = sem_post(&this_task->ready_for_work);
 481        BUG_ON(ret);
 482        ret = pthread_mutex_lock(&sched->start_work_mutex);
 483        BUG_ON(ret);
 484        ret = pthread_mutex_unlock(&sched->start_work_mutex);
 485        BUG_ON(ret);
 486
 487        cpu_usage_0 = get_cpu_usage_nsec_self(fd);
 488
 489        for (i = 0; i < this_task->nr_events; i++) {
 490                this_task->curr_event = i;
 491                perf_sched__process_event(sched, this_task->atoms[i]);
 492        }
 493
 494        cpu_usage_1 = get_cpu_usage_nsec_self(fd);
 495        this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
 496        ret = sem_post(&this_task->work_done_sem);
 497        BUG_ON(ret);
 498
 499        ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
 500        BUG_ON(ret);
 501        ret = pthread_mutex_unlock(&sched->work_done_wait_mutex);
 502        BUG_ON(ret);
 503
 504        goto again;
 505}
 506
 507static void create_tasks(struct perf_sched *sched)
 508{
 509        struct task_desc *task;
 510        pthread_attr_t attr;
 511        unsigned long i;
 512        int err;
 513
 514        err = pthread_attr_init(&attr);
 515        BUG_ON(err);
 516        err = pthread_attr_setstacksize(&attr,
 517                        (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
 518        BUG_ON(err);
 519        err = pthread_mutex_lock(&sched->start_work_mutex);
 520        BUG_ON(err);
 521        err = pthread_mutex_lock(&sched->work_done_wait_mutex);
 522        BUG_ON(err);
 523        for (i = 0; i < sched->nr_tasks; i++) {
 524                struct sched_thread_parms *parms = malloc(sizeof(*parms));
 525                BUG_ON(parms == NULL);
 526                parms->task = task = sched->tasks[i];
 527                parms->sched = sched;
 528                sem_init(&task->sleep_sem, 0, 0);
 529                sem_init(&task->ready_for_work, 0, 0);
 530                sem_init(&task->work_done_sem, 0, 0);
 531                task->curr_event = 0;
 532                err = pthread_create(&task->thread, &attr, thread_func, parms);
 533                BUG_ON(err);
 534        }
 535}
 536
 537static void wait_for_tasks(struct perf_sched *sched)
 538{
 539        u64 cpu_usage_0, cpu_usage_1;
 540        struct task_desc *task;
 541        unsigned long i, ret;
 542
 543        sched->start_time = get_nsecs();
 544        sched->cpu_usage = 0;
 545        pthread_mutex_unlock(&sched->work_done_wait_mutex);
 546
 547        for (i = 0; i < sched->nr_tasks; i++) {
 548                task = sched->tasks[i];
 549                ret = sem_wait(&task->ready_for_work);
 550                BUG_ON(ret);
 551                sem_init(&task->ready_for_work, 0, 0);
 552        }
 553        ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
 554        BUG_ON(ret);
 555
 556        cpu_usage_0 = get_cpu_usage_nsec_parent();
 557
 558        pthread_mutex_unlock(&sched->start_work_mutex);
 559
 560        for (i = 0; i < sched->nr_tasks; i++) {
 561                task = sched->tasks[i];
 562                ret = sem_wait(&task->work_done_sem);
 563                BUG_ON(ret);
 564                sem_init(&task->work_done_sem, 0, 0);
 565                sched->cpu_usage += task->cpu_usage;
 566                task->cpu_usage = 0;
 567        }
 568
 569        cpu_usage_1 = get_cpu_usage_nsec_parent();
 570        if (!sched->runavg_cpu_usage)
 571                sched->runavg_cpu_usage = sched->cpu_usage;
 572        sched->runavg_cpu_usage = (sched->runavg_cpu_usage * 9 + sched->cpu_usage) / 10;
 573
 574        sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
 575        if (!sched->runavg_parent_cpu_usage)
 576                sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
 577        sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * 9 +
 578                                         sched->parent_cpu_usage)/10;
 579
 580        ret = pthread_mutex_lock(&sched->start_work_mutex);
 581        BUG_ON(ret);
 582
 583        for (i = 0; i < sched->nr_tasks; i++) {
 584                task = sched->tasks[i];
 585                sem_init(&task->sleep_sem, 0, 0);
 586                task->curr_event = 0;
 587        }
 588}
 589
 590static void run_one_test(struct perf_sched *sched)
 591{
 592        u64 T0, T1, delta, avg_delta, fluct;
 593
 594        T0 = get_nsecs();
 595        wait_for_tasks(sched);
 596        T1 = get_nsecs();
 597
 598        delta = T1 - T0;
 599        sched->sum_runtime += delta;
 600        sched->nr_runs++;
 601
 602        avg_delta = sched->sum_runtime / sched->nr_runs;
 603        if (delta < avg_delta)
 604                fluct = avg_delta - delta;
 605        else
 606                fluct = delta - avg_delta;
 607        sched->sum_fluct += fluct;
 608        if (!sched->run_avg)
 609                sched->run_avg = delta;
 610        sched->run_avg = (sched->run_avg * 9 + delta) / 10;
 611
 612        printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / 1000000.0);
 613
 614        printf("ravg: %0.2f, ", (double)sched->run_avg / 1e6);
 615
 616        printf("cpu: %0.2f / %0.2f",
 617                (double)sched->cpu_usage / 1e6, (double)sched->runavg_cpu_usage / 1e6);
 618
 619#if 0
 620        /*
 621         * rusage statistics done by the parent, these are less
 622         * accurate than the sched->sum_exec_runtime based statistics:
 623         */
 624        printf(" [%0.2f / %0.2f]",
 625                (double)sched->parent_cpu_usage/1e6,
 626                (double)sched->runavg_parent_cpu_usage/1e6);
 627#endif
 628
 629        printf("\n");
 630
 631        if (sched->nr_sleep_corrections)
 632                printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
 633        sched->nr_sleep_corrections = 0;
 634}
 635
 636static void test_calibrations(struct perf_sched *sched)
 637{
 638        u64 T0, T1;
 639
 640        T0 = get_nsecs();
 641        burn_nsecs(sched, 1e6);
 642        T1 = get_nsecs();
 643
 644        printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
 645
 646        T0 = get_nsecs();
 647        sleep_nsecs(1e6);
 648        T1 = get_nsecs();
 649
 650        printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
 651}
 652
 653static int
 654replay_wakeup_event(struct perf_sched *sched,
 655                    struct perf_evsel *evsel, struct perf_sample *sample,
 656                    struct machine *machine __maybe_unused)
 657{
 658        const char *comm = perf_evsel__strval(evsel, sample, "comm");
 659        const u32 pid    = perf_evsel__intval(evsel, sample, "pid");
 660        struct task_desc *waker, *wakee;
 661
 662        if (verbose) {
 663                printf("sched_wakeup event %p\n", evsel);
 664
 665                printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
 666        }
 667
 668        waker = register_pid(sched, sample->tid, "<unknown>");
 669        wakee = register_pid(sched, pid, comm);
 670
 671        add_sched_event_wakeup(sched, waker, sample->time, wakee);
 672        return 0;
 673}
 674
 675static int replay_switch_event(struct perf_sched *sched,
 676                               struct perf_evsel *evsel,
 677                               struct perf_sample *sample,
 678                               struct machine *machine __maybe_unused)
 679{
 680        const char *prev_comm  = perf_evsel__strval(evsel, sample, "prev_comm"),
 681                   *next_comm  = perf_evsel__strval(evsel, sample, "next_comm");
 682        const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
 683                  next_pid = perf_evsel__intval(evsel, sample, "next_pid");
 684        const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
 685        struct task_desc *prev, __maybe_unused *next;
 686        u64 timestamp0, timestamp = sample->time;
 687        int cpu = sample->cpu;
 688        s64 delta;
 689
 690        if (verbose)
 691                printf("sched_switch event %p\n", evsel);
 692
 693        if (cpu >= MAX_CPUS || cpu < 0)
 694                return 0;
 695
 696        timestamp0 = sched->cpu_last_switched[cpu];
 697        if (timestamp0)
 698                delta = timestamp - timestamp0;
 699        else
 700                delta = 0;
 701
 702        if (delta < 0) {
 703                pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
 704                return -1;
 705        }
 706
 707        pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
 708                 prev_comm, prev_pid, next_comm, next_pid, delta);
 709
 710        prev = register_pid(sched, prev_pid, prev_comm);
 711        next = register_pid(sched, next_pid, next_comm);
 712
 713        sched->cpu_last_switched[cpu] = timestamp;
 714
 715        add_sched_event_run(sched, prev, timestamp, delta);
 716        add_sched_event_sleep(sched, prev, timestamp, prev_state);
 717
 718        return 0;
 719}
 720
 721static int replay_fork_event(struct perf_sched *sched, struct perf_evsel *evsel,
 722                             struct perf_sample *sample)
 723{
 724        const char *parent_comm = perf_evsel__strval(evsel, sample, "parent_comm"),
 725                   *child_comm  = perf_evsel__strval(evsel, sample, "child_comm");
 726        const u32 parent_pid  = perf_evsel__intval(evsel, sample, "parent_pid"),
 727                  child_pid  = perf_evsel__intval(evsel, sample, "child_pid");
 728
 729        if (verbose) {
 730                printf("sched_fork event %p\n", evsel);
 731                printf("... parent: %s/%d\n", parent_comm, parent_pid);
 732                printf("...  child: %s/%d\n", child_comm, child_pid);
 733        }
 734
 735        register_pid(sched, parent_pid, parent_comm);
 736        register_pid(sched, child_pid, child_comm);
 737        return 0;
 738}
 739
 740struct sort_dimension {
 741        const char              *name;
 742        sort_fn_t               cmp;
 743        struct list_head        list;
 744};
 745
 746static int
 747thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
 748{
 749        struct sort_dimension *sort;
 750        int ret = 0;
 751
 752        BUG_ON(list_empty(list));
 753
 754        list_for_each_entry(sort, list, list) {
 755                ret = sort->cmp(l, r);
 756                if (ret)
 757                        return ret;
 758        }
 759
 760        return ret;
 761}
 762
 763static struct work_atoms *
 764thread_atoms_search(struct rb_root *root, struct thread *thread,
 765                         struct list_head *sort_list)
 766{
 767        struct rb_node *node = root->rb_node;
 768        struct work_atoms key = { .thread = thread };
 769
 770        while (node) {
 771                struct work_atoms *atoms;
 772                int cmp;
 773
 774                atoms = container_of(node, struct work_atoms, node);
 775
 776                cmp = thread_lat_cmp(sort_list, &key, atoms);
 777                if (cmp > 0)
 778                        node = node->rb_left;
 779                else if (cmp < 0)
 780                        node = node->rb_right;
 781                else {
 782                        BUG_ON(thread != atoms->thread);
 783                        return atoms;
 784                }
 785        }
 786        return NULL;
 787}
 788
 789static void
 790__thread_latency_insert(struct rb_root *root, struct work_atoms *data,
 791                         struct list_head *sort_list)
 792{
 793        struct rb_node **new = &(root->rb_node), *parent = NULL;
 794
 795        while (*new) {
 796                struct work_atoms *this;
 797                int cmp;
 798
 799                this = container_of(*new, struct work_atoms, node);
 800                parent = *new;
 801
 802                cmp = thread_lat_cmp(sort_list, data, this);
 803
 804                if (cmp > 0)
 805                        new = &((*new)->rb_left);
 806                else
 807                        new = &((*new)->rb_right);
 808        }
 809
 810        rb_link_node(&data->node, parent, new);
 811        rb_insert_color(&data->node, root);
 812}
 813
 814static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
 815{
 816        struct work_atoms *atoms = zalloc(sizeof(*atoms));
 817        if (!atoms) {
 818                pr_err("No memory at %s\n", __func__);
 819                return -1;
 820        }
 821
 822        atoms->thread = thread;
 823        INIT_LIST_HEAD(&atoms->work_list);
 824        __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
 825        return 0;
 826}
 827
 828static int latency_fork_event(struct perf_sched *sched __maybe_unused,
 829                              struct perf_evsel *evsel __maybe_unused,
 830                              struct perf_sample *sample __maybe_unused)
 831{
 832        /* should insert the newcomer */
 833        return 0;
 834}
 835
 836static char sched_out_state(u64 prev_state)
 837{
 838        const char *str = TASK_STATE_TO_CHAR_STR;
 839
 840        return str[prev_state];
 841}
 842
 843static int
 844add_sched_out_event(struct work_atoms *atoms,
 845                    char run_state,
 846                    u64 timestamp)
 847{
 848        struct work_atom *atom = zalloc(sizeof(*atom));
 849        if (!atom) {
 850                pr_err("Non memory at %s", __func__);
 851                return -1;
 852        }
 853
 854        atom->sched_out_time = timestamp;
 855
 856        if (run_state == 'R') {
 857                atom->state = THREAD_WAIT_CPU;
 858                atom->wake_up_time = atom->sched_out_time;
 859        }
 860
 861        list_add_tail(&atom->list, &atoms->work_list);
 862        return 0;
 863}
 864
 865static void
 866add_runtime_event(struct work_atoms *atoms, u64 delta,
 867                  u64 timestamp __maybe_unused)
 868{
 869        struct work_atom *atom;
 870
 871        BUG_ON(list_empty(&atoms->work_list));
 872
 873        atom = list_entry(atoms->work_list.prev, struct work_atom, list);
 874
 875        atom->runtime += delta;
 876        atoms->total_runtime += delta;
 877}
 878
 879static void
 880add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
 881{
 882        struct work_atom *atom;
 883        u64 delta;
 884
 885        if (list_empty(&atoms->work_list))
 886                return;
 887
 888        atom = list_entry(atoms->work_list.prev, struct work_atom, list);
 889
 890        if (atom->state != THREAD_WAIT_CPU)
 891                return;
 892
 893        if (timestamp < atom->wake_up_time) {
 894                atom->state = THREAD_IGNORE;
 895                return;
 896        }
 897
 898        atom->state = THREAD_SCHED_IN;
 899        atom->sched_in_time = timestamp;
 900
 901        delta = atom->sched_in_time - atom->wake_up_time;
 902        atoms->total_lat += delta;
 903        if (delta > atoms->max_lat) {
 904                atoms->max_lat = delta;
 905                atoms->max_lat_at = timestamp;
 906        }
 907        atoms->nb_atoms++;
 908}
 909
 910static int latency_switch_event(struct perf_sched *sched,
 911                                struct perf_evsel *evsel,
 912                                struct perf_sample *sample,
 913                                struct machine *machine)
 914{
 915        const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
 916                  next_pid = perf_evsel__intval(evsel, sample, "next_pid");
 917        const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
 918        struct work_atoms *out_events, *in_events;
 919        struct thread *sched_out, *sched_in;
 920        u64 timestamp0, timestamp = sample->time;
 921        int cpu = sample->cpu;
 922        s64 delta;
 923
 924        BUG_ON(cpu >= MAX_CPUS || cpu < 0);
 925
 926        timestamp0 = sched->cpu_last_switched[cpu];
 927        sched->cpu_last_switched[cpu] = timestamp;
 928        if (timestamp0)
 929                delta = timestamp - timestamp0;
 930        else
 931                delta = 0;
 932
 933        if (delta < 0) {
 934                pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
 935                return -1;
 936        }
 937
 938        sched_out = machine__findnew_thread(machine, prev_pid);
 939        sched_in = machine__findnew_thread(machine, next_pid);
 940
 941        out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
 942        if (!out_events) {
 943                if (thread_atoms_insert(sched, sched_out))
 944                        return -1;
 945                out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
 946                if (!out_events) {
 947                        pr_err("out-event: Internal tree error");
 948                        return -1;
 949                }
 950        }
 951        if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
 952                return -1;
 953
 954        in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
 955        if (!in_events) {
 956                if (thread_atoms_insert(sched, sched_in))
 957                        return -1;
 958                in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
 959                if (!in_events) {
 960                        pr_err("in-event: Internal tree error");
 961                        return -1;
 962                }
 963                /*
 964                 * Take came in we have not heard about yet,
 965                 * add in an initial atom in runnable state:
 966                 */
 967                if (add_sched_out_event(in_events, 'R', timestamp))
 968                        return -1;
 969        }
 970        add_sched_in_event(in_events, timestamp);
 971
 972        return 0;
 973}
 974
 975static int latency_runtime_event(struct perf_sched *sched,
 976                                 struct perf_evsel *evsel,
 977                                 struct perf_sample *sample,
 978                                 struct machine *machine)
 979{
 980        const u32 pid      = perf_evsel__intval(evsel, sample, "pid");
 981        const u64 runtime  = perf_evsel__intval(evsel, sample, "runtime");
 982        struct thread *thread = machine__findnew_thread(machine, pid);
 983        struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
 984        u64 timestamp = sample->time;
 985        int cpu = sample->cpu;
 986
 987        BUG_ON(cpu >= MAX_CPUS || cpu < 0);
 988        if (!atoms) {
 989                if (thread_atoms_insert(sched, thread))
 990                        return -1;
 991                atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
 992                if (!atoms) {
 993                        pr_err("in-event: Internal tree error");
 994                        return -1;
 995                }
 996                if (add_sched_out_event(atoms, 'R', timestamp))
 997                        return -1;
 998        }
 999
1000        add_runtime_event(atoms, runtime, timestamp);
1001        return 0;
1002}
1003
1004static int latency_wakeup_event(struct perf_sched *sched,
1005                                struct perf_evsel *evsel,
1006                                struct perf_sample *sample,
1007                                struct machine *machine)
1008{
1009        const u32 pid     = perf_evsel__intval(evsel, sample, "pid"),
1010                  success = perf_evsel__intval(evsel, sample, "success");
1011        struct work_atoms *atoms;
1012        struct work_atom *atom;
1013        struct thread *wakee;
1014        u64 timestamp = sample->time;
1015
1016        /* Note for later, it may be interesting to observe the failing cases */
1017        if (!success)
1018                return 0;
1019
1020        wakee = machine__findnew_thread(machine, pid);
1021        atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1022        if (!atoms) {
1023                if (thread_atoms_insert(sched, wakee))
1024                        return -1;
1025                atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1026                if (!atoms) {
1027                        pr_err("wakeup-event: Internal tree error");
1028                        return -1;
1029                }
1030                if (add_sched_out_event(atoms, 'S', timestamp))
1031                        return -1;
1032        }
1033
1034        BUG_ON(list_empty(&atoms->work_list));
1035
1036        atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1037
1038        /*
1039         * You WILL be missing events if you've recorded only
1040         * one CPU, or are only looking at only one, so don't
1041         * make useless noise.
1042         */
1043        if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1044                sched->nr_state_machine_bugs++;
1045
1046        sched->nr_timestamps++;
1047        if (atom->sched_out_time > timestamp) {
1048                sched->nr_unordered_timestamps++;
1049                return 0;
1050        }
1051
1052        atom->state = THREAD_WAIT_CPU;
1053        atom->wake_up_time = timestamp;
1054        return 0;
1055}
1056
1057static int latency_migrate_task_event(struct perf_sched *sched,
1058                                      struct perf_evsel *evsel,
1059                                      struct perf_sample *sample,
1060                                      struct machine *machine)
1061{
1062        const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1063        u64 timestamp = sample->time;
1064        struct work_atoms *atoms;
1065        struct work_atom *atom;
1066        struct thread *migrant;
1067
1068        /*
1069         * Only need to worry about migration when profiling one CPU.
1070         */
1071        if (sched->profile_cpu == -1)
1072                return 0;
1073
1074        migrant = machine__findnew_thread(machine, pid);
1075        atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1076        if (!atoms) {
1077                if (thread_atoms_insert(sched, migrant))
1078                        return -1;
1079                register_pid(sched, migrant->pid, migrant->comm);
1080                atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1081                if (!atoms) {
1082                        pr_err("migration-event: Internal tree error");
1083                        return -1;
1084                }
1085                if (add_sched_out_event(atoms, 'R', timestamp))
1086                        return -1;
1087        }
1088
1089        BUG_ON(list_empty(&atoms->work_list));
1090
1091        atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1092        atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1093
1094        sched->nr_timestamps++;
1095
1096        if (atom->sched_out_time > timestamp)
1097                sched->nr_unordered_timestamps++;
1098
1099        return 0;
1100}
1101
1102static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1103{
1104        int i;
1105        int ret;
1106        u64 avg;
1107
1108        if (!work_list->nb_atoms)
1109                return;
1110        /*
1111         * Ignore idle threads:
1112         */
1113        if (!strcmp(work_list->thread->comm, "swapper"))
1114                return;
1115
1116        sched->all_runtime += work_list->total_runtime;
1117        sched->all_count   += work_list->nb_atoms;
1118
1119        ret = printf("  %s:%d ", work_list->thread->comm, work_list->thread->pid);
1120
1121        for (i = 0; i < 24 - ret; i++)
1122                printf(" ");
1123
1124        avg = work_list->total_lat / work_list->nb_atoms;
1125
1126        printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
1127              (double)work_list->total_runtime / 1e6,
1128                 work_list->nb_atoms, (double)avg / 1e6,
1129                 (double)work_list->max_lat / 1e6,
1130                 (double)work_list->max_lat_at / 1e9);
1131}
1132
1133static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1134{
1135        if (l->thread->pid < r->thread->pid)
1136                return -1;
1137        if (l->thread->pid > r->thread->pid)
1138                return 1;
1139
1140        return 0;
1141}
1142
1143static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1144{
1145        u64 avgl, avgr;
1146
1147        if (!l->nb_atoms)
1148                return -1;
1149
1150        if (!r->nb_atoms)
1151                return 1;
1152
1153        avgl = l->total_lat / l->nb_atoms;
1154        avgr = r->total_lat / r->nb_atoms;
1155
1156        if (avgl < avgr)
1157                return -1;
1158        if (avgl > avgr)
1159                return 1;
1160
1161        return 0;
1162}
1163
1164static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1165{
1166        if (l->max_lat < r->max_lat)
1167                return -1;
1168        if (l->max_lat > r->max_lat)
1169                return 1;
1170
1171        return 0;
1172}
1173
1174static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1175{
1176        if (l->nb_atoms < r->nb_atoms)
1177                return -1;
1178        if (l->nb_atoms > r->nb_atoms)
1179                return 1;
1180
1181        return 0;
1182}
1183
1184static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1185{
1186        if (l->total_runtime < r->total_runtime)
1187                return -1;
1188        if (l->total_runtime > r->total_runtime)
1189                return 1;
1190
1191        return 0;
1192}
1193
1194static int sort_dimension__add(const char *tok, struct list_head *list)
1195{
1196        size_t i;
1197        static struct sort_dimension avg_sort_dimension = {
1198                .name = "avg",
1199                .cmp  = avg_cmp,
1200        };
1201        static struct sort_dimension max_sort_dimension = {
1202                .name = "max",
1203                .cmp  = max_cmp,
1204        };
1205        static struct sort_dimension pid_sort_dimension = {
1206                .name = "pid",
1207                .cmp  = pid_cmp,
1208        };
1209        static struct sort_dimension runtime_sort_dimension = {
1210                .name = "runtime",
1211                .cmp  = runtime_cmp,
1212        };
1213        static struct sort_dimension switch_sort_dimension = {
1214                .name = "switch",
1215                .cmp  = switch_cmp,
1216        };
1217        struct sort_dimension *available_sorts[] = {
1218                &pid_sort_dimension,
1219                &avg_sort_dimension,
1220                &max_sort_dimension,
1221                &switch_sort_dimension,
1222                &runtime_sort_dimension,
1223        };
1224
1225        for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1226                if (!strcmp(available_sorts[i]->name, tok)) {
1227                        list_add_tail(&available_sorts[i]->list, list);
1228
1229                        return 0;
1230                }
1231        }
1232
1233        return -1;
1234}
1235
1236static void perf_sched__sort_lat(struct perf_sched *sched)
1237{
1238        struct rb_node *node;
1239
1240        for (;;) {
1241                struct work_atoms *data;
1242                node = rb_first(&sched->atom_root);
1243                if (!node)
1244                        break;
1245
1246                rb_erase(node, &sched->atom_root);
1247                data = rb_entry(node, struct work_atoms, node);
1248                __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1249        }
1250}
1251
1252static int process_sched_wakeup_event(struct perf_tool *tool,
1253                                      struct perf_evsel *evsel,
1254                                      struct perf_sample *sample,
1255                                      struct machine *machine)
1256{
1257        struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1258
1259        if (sched->tp_handler->wakeup_event)
1260                return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1261
1262        return 0;
1263}
1264
1265static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
1266                            struct perf_sample *sample, struct machine *machine)
1267{
1268        const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
1269                  next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1270        struct thread *sched_out __maybe_unused, *sched_in;
1271        int new_shortname;
1272        u64 timestamp0, timestamp = sample->time;
1273        s64 delta;
1274        int cpu, this_cpu = sample->cpu;
1275
1276        BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1277
1278        if (this_cpu > sched->max_cpu)
1279                sched->max_cpu = this_cpu;
1280
1281        timestamp0 = sched->cpu_last_switched[this_cpu];
1282        sched->cpu_last_switched[this_cpu] = timestamp;
1283        if (timestamp0)
1284                delta = timestamp - timestamp0;
1285        else
1286                delta = 0;
1287
1288        if (delta < 0) {
1289                pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1290                return -1;
1291        }
1292
1293        sched_out = machine__findnew_thread(machine, prev_pid);
1294        sched_in = machine__findnew_thread(machine, next_pid);
1295
1296        sched->curr_thread[this_cpu] = sched_in;
1297
1298        printf("  ");
1299
1300        new_shortname = 0;
1301        if (!sched_in->shortname[0]) {
1302                sched_in->shortname[0] = sched->next_shortname1;
1303                sched_in->shortname[1] = sched->next_shortname2;
1304
1305                if (sched->next_shortname1 < 'Z') {
1306                        sched->next_shortname1++;
1307                } else {
1308                        sched->next_shortname1='A';
1309                        if (sched->next_shortname2 < '9') {
1310                                sched->next_shortname2++;
1311                        } else {
1312                                sched->next_shortname2='0';
1313                        }
1314                }
1315                new_shortname = 1;
1316        }
1317
1318        for (cpu = 0; cpu <= sched->max_cpu; cpu++) {
1319                if (cpu != this_cpu)
1320                        printf(" ");
1321                else
1322                        printf("*");
1323
1324                if (sched->curr_thread[cpu]) {
1325                        if (sched->curr_thread[cpu]->pid)
1326                                printf("%2s ", sched->curr_thread[cpu]->shortname);
1327                        else
1328                                printf(".  ");
1329                } else
1330                        printf("   ");
1331        }
1332
1333        printf("  %12.6f secs ", (double)timestamp/1e9);
1334        if (new_shortname) {
1335                printf("%s => %s:%d\n",
1336                        sched_in->shortname, sched_in->comm, sched_in->pid);
1337        } else {
1338                printf("\n");
1339        }
1340
1341        return 0;
1342}
1343
1344static int process_sched_switch_event(struct perf_tool *tool,
1345                                      struct perf_evsel *evsel,
1346                                      struct perf_sample *sample,
1347                                      struct machine *machine)
1348{
1349        struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1350        int this_cpu = sample->cpu, err = 0;
1351        u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
1352            next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1353
1354        if (sched->curr_pid[this_cpu] != (u32)-1) {
1355                /*
1356                 * Are we trying to switch away a PID that is
1357                 * not current?
1358                 */
1359                if (sched->curr_pid[this_cpu] != prev_pid)
1360                        sched->nr_context_switch_bugs++;
1361        }
1362
1363        if (sched->tp_handler->switch_event)
1364                err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1365
1366        sched->curr_pid[this_cpu] = next_pid;
1367        return err;
1368}
1369
1370static int process_sched_runtime_event(struct perf_tool *tool,
1371                                       struct perf_evsel *evsel,
1372                                       struct perf_sample *sample,
1373                                       struct machine *machine)
1374{
1375        struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1376
1377        if (sched->tp_handler->runtime_event)
1378                return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1379
1380        return 0;
1381}
1382
1383static int process_sched_fork_event(struct perf_tool *tool,
1384                                    struct perf_evsel *evsel,
1385                                    struct perf_sample *sample,
1386                                    struct machine *machine __maybe_unused)
1387{
1388        struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1389
1390        if (sched->tp_handler->fork_event)
1391                return sched->tp_handler->fork_event(sched, evsel, sample);
1392
1393        return 0;
1394}
1395
1396static int process_sched_exit_event(struct perf_tool *tool __maybe_unused,
1397                                    struct perf_evsel *evsel,
1398                                    struct perf_sample *sample __maybe_unused,
1399                                    struct machine *machine __maybe_unused)
1400{
1401        pr_debug("sched_exit event %p\n", evsel);
1402        return 0;
1403}
1404
1405static int process_sched_migrate_task_event(struct perf_tool *tool,
1406                                            struct perf_evsel *evsel,
1407                                            struct perf_sample *sample,
1408                                            struct machine *machine)
1409{
1410        struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1411
1412        if (sched->tp_handler->migrate_task_event)
1413                return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1414
1415        return 0;
1416}
1417
1418typedef int (*tracepoint_handler)(struct perf_tool *tool,
1419                                  struct perf_evsel *evsel,
1420                                  struct perf_sample *sample,
1421                                  struct machine *machine);
1422
1423static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
1424                                                 union perf_event *event __maybe_unused,
1425                                                 struct perf_sample *sample,
1426                                                 struct perf_evsel *evsel,
1427                                                 struct machine *machine)
1428{
1429        struct thread *thread = machine__findnew_thread(machine, sample->tid);
1430        int err = 0;
1431
1432        if (thread == NULL) {
1433                pr_debug("problem processing %s event, skipping it.\n",
1434                         perf_evsel__name(evsel));
1435                return -1;
1436        }
1437
1438        evsel->hists.stats.total_period += sample->period;
1439        hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
1440
1441        if (evsel->handler.func != NULL) {
1442                tracepoint_handler f = evsel->handler.func;
1443                err = f(tool, evsel, sample, machine);
1444        }
1445
1446        return err;
1447}
1448
1449static int perf_sched__read_events(struct perf_sched *sched, bool destroy,
1450                                   struct perf_session **psession)
1451{
1452        const struct perf_evsel_str_handler handlers[] = {
1453                { "sched:sched_switch",       process_sched_switch_event, },
1454                { "sched:sched_stat_runtime", process_sched_runtime_event, },
1455                { "sched:sched_wakeup",       process_sched_wakeup_event, },
1456                { "sched:sched_wakeup_new",   process_sched_wakeup_event, },
1457                { "sched:sched_process_fork", process_sched_fork_event, },
1458                { "sched:sched_process_exit", process_sched_exit_event, },
1459                { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1460        };
1461        struct perf_session *session;
1462
1463        session = perf_session__new(sched->input_name, O_RDONLY, 0, false, &sched->tool);
1464        if (session == NULL) {
1465                pr_debug("No Memory for session\n");
1466                return -1;
1467        }
1468
1469        if (perf_session__set_tracepoints_handlers(session, handlers))
1470                goto out_delete;
1471
1472        if (perf_session__has_traces(session, "record -R")) {
1473                int err = perf_session__process_events(session, &sched->tool);
1474                if (err) {
1475                        pr_err("Failed to process events, error %d", err);
1476                        goto out_delete;
1477                }
1478
1479                sched->nr_events      = session->hists.stats.nr_events[0];
1480                sched->nr_lost_events = session->hists.stats.total_lost;
1481                sched->nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST];
1482        }
1483
1484        if (destroy)
1485                perf_session__delete(session);
1486
1487        if (psession)
1488                *psession = session;
1489
1490        return 0;
1491
1492out_delete:
1493        perf_session__delete(session);
1494        return -1;
1495}
1496
1497static void print_bad_events(struct perf_sched *sched)
1498{
1499        if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
1500                printf("  INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1501                        (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
1502                        sched->nr_unordered_timestamps, sched->nr_timestamps);
1503        }
1504        if (sched->nr_lost_events && sched->nr_events) {
1505                printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1506                        (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
1507                        sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
1508        }
1509        if (sched->nr_state_machine_bugs && sched->nr_timestamps) {
1510                printf("  INFO: %.3f%% state machine bugs (%ld out of %ld)",
1511                        (double)sched->nr_state_machine_bugs/(double)sched->nr_timestamps*100.0,
1512                        sched->nr_state_machine_bugs, sched->nr_timestamps);
1513                if (sched->nr_lost_events)
1514                        printf(" (due to lost events?)");
1515                printf("\n");
1516        }
1517        if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
1518                printf("  INFO: %.3f%% context switch bugs (%ld out of %ld)",
1519                        (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
1520                        sched->nr_context_switch_bugs, sched->nr_timestamps);
1521                if (sched->nr_lost_events)
1522                        printf(" (due to lost events?)");
1523                printf("\n");
1524        }
1525}
1526
1527static int perf_sched__lat(struct perf_sched *sched)
1528{
1529        struct rb_node *next;
1530        struct perf_session *session;
1531
1532        setup_pager();
1533        if (perf_sched__read_events(sched, false, &session))
1534                return -1;
1535        perf_sched__sort_lat(sched);
1536
1537        printf("\n ---------------------------------------------------------------------------------------------------------------\n");
1538        printf("  Task                  |   Runtime ms  | Switches | Average delay ms | Maximum delay ms | Maximum delay at     |\n");
1539        printf(" ---------------------------------------------------------------------------------------------------------------\n");
1540
1541        next = rb_first(&sched->sorted_atom_root);
1542
1543        while (next) {
1544                struct work_atoms *work_list;
1545
1546                work_list = rb_entry(next, struct work_atoms, node);
1547                output_lat_thread(sched, work_list);
1548                next = rb_next(next);
1549        }
1550
1551        printf(" -----------------------------------------------------------------------------------------\n");
1552        printf("  TOTAL:                |%11.3f ms |%9" PRIu64 " |\n",
1553                (double)sched->all_runtime / 1e6, sched->all_count);
1554
1555        printf(" ---------------------------------------------------\n");
1556
1557        print_bad_events(sched);
1558        printf("\n");
1559
1560        perf_session__delete(session);
1561        return 0;
1562}
1563
1564static int perf_sched__map(struct perf_sched *sched)
1565{
1566        sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
1567
1568        setup_pager();
1569        if (perf_sched__read_events(sched, true, NULL))
1570                return -1;
1571        print_bad_events(sched);
1572        return 0;
1573}
1574
1575static int perf_sched__replay(struct perf_sched *sched)
1576{
1577        unsigned long i;
1578
1579        calibrate_run_measurement_overhead(sched);
1580        calibrate_sleep_measurement_overhead(sched);
1581
1582        test_calibrations(sched);
1583
1584        if (perf_sched__read_events(sched, true, NULL))
1585                return -1;
1586
1587        printf("nr_run_events:        %ld\n", sched->nr_run_events);
1588        printf("nr_sleep_events:      %ld\n", sched->nr_sleep_events);
1589        printf("nr_wakeup_events:     %ld\n", sched->nr_wakeup_events);
1590
1591        if (sched->targetless_wakeups)
1592                printf("target-less wakeups:  %ld\n", sched->targetless_wakeups);
1593        if (sched->multitarget_wakeups)
1594                printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
1595        if (sched->nr_run_events_optimized)
1596                printf("run atoms optimized: %ld\n",
1597                        sched->nr_run_events_optimized);
1598
1599        print_task_traces(sched);
1600        add_cross_task_wakeups(sched);
1601
1602        create_tasks(sched);
1603        printf("------------------------------------------------------------\n");
1604        for (i = 0; i < sched->replay_repeat; i++)
1605                run_one_test(sched);
1606
1607        return 0;
1608}
1609
1610static void setup_sorting(struct perf_sched *sched, const struct option *options,
1611                          const char * const usage_msg[])
1612{
1613        char *tmp, *tok, *str = strdup(sched->sort_order);
1614
1615        for (tok = strtok_r(str, ", ", &tmp);
1616                        tok; tok = strtok_r(NULL, ", ", &tmp)) {
1617                if (sort_dimension__add(tok, &sched->sort_list) < 0) {
1618                        error("Unknown --sort key: `%s'", tok);
1619                        usage_with_options(usage_msg, options);
1620                }
1621        }
1622
1623        free(str);
1624
1625        sort_dimension__add("pid", &sched->cmp_pid);
1626}
1627
1628static int __cmd_record(int argc, const char **argv)
1629{
1630        unsigned int rec_argc, i, j;
1631        const char **rec_argv;
1632        const char * const record_args[] = {
1633                "record",
1634                "-a",
1635                "-R",
1636                "-f",
1637                "-m", "1024",
1638                "-c", "1",
1639                "-e", "sched:sched_switch",
1640                "-e", "sched:sched_stat_wait",
1641                "-e", "sched:sched_stat_sleep",
1642                "-e", "sched:sched_stat_iowait",
1643                "-e", "sched:sched_stat_runtime",
1644                "-e", "sched:sched_process_exit",
1645                "-e", "sched:sched_process_fork",
1646                "-e", "sched:sched_wakeup",
1647                "-e", "sched:sched_migrate_task",
1648        };
1649
1650        rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1651        rec_argv = calloc(rec_argc + 1, sizeof(char *));
1652
1653        if (rec_argv == NULL)
1654                return -ENOMEM;
1655
1656        for (i = 0; i < ARRAY_SIZE(record_args); i++)
1657                rec_argv[i] = strdup(record_args[i]);
1658
1659        for (j = 1; j < (unsigned int)argc; j++, i++)
1660                rec_argv[i] = argv[j];
1661
1662        BUG_ON(i != rec_argc);
1663
1664        return cmd_record(i, rec_argv, NULL);
1665}
1666
1667int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
1668{
1669        const char default_sort_order[] = "avg, max, switch, runtime";
1670        struct perf_sched sched = {
1671                .tool = {
1672                        .sample          = perf_sched__process_tracepoint_sample,
1673                        .comm            = perf_event__process_comm,
1674                        .lost            = perf_event__process_lost,
1675                        .fork            = perf_event__process_task,
1676                        .ordered_samples = true,
1677                },
1678                .cmp_pid              = LIST_HEAD_INIT(sched.cmp_pid),
1679                .sort_list            = LIST_HEAD_INIT(sched.sort_list),
1680                .start_work_mutex     = PTHREAD_MUTEX_INITIALIZER,
1681                .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
1682                .curr_pid             = { [0 ... MAX_CPUS - 1] = -1 },
1683                .sort_order           = default_sort_order,
1684                .replay_repeat        = 10,
1685                .profile_cpu          = -1,
1686                .next_shortname1      = 'A',
1687                .next_shortname2      = '0',
1688        };
1689        const struct option latency_options[] = {
1690        OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
1691                   "sort by key(s): runtime, switch, avg, max"),
1692        OPT_INCR('v', "verbose", &verbose,
1693                    "be more verbose (show symbol address, etc)"),
1694        OPT_INTEGER('C', "CPU", &sched.profile_cpu,
1695                    "CPU to profile on"),
1696        OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1697                    "dump raw trace in ASCII"),
1698        OPT_END()
1699        };
1700        const struct option replay_options[] = {
1701        OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
1702                     "repeat the workload replay N times (-1: infinite)"),
1703        OPT_INCR('v', "verbose", &verbose,
1704                    "be more verbose (show symbol address, etc)"),
1705        OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1706                    "dump raw trace in ASCII"),
1707        OPT_END()
1708        };
1709        const struct option sched_options[] = {
1710        OPT_STRING('i', "input", &sched.input_name, "file",
1711                    "input file name"),
1712        OPT_INCR('v', "verbose", &verbose,
1713                    "be more verbose (show symbol address, etc)"),
1714        OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1715                    "dump raw trace in ASCII"),
1716        OPT_END()
1717        };
1718        const char * const latency_usage[] = {
1719                "perf sched latency [<options>]",
1720                NULL
1721        };
1722        const char * const replay_usage[] = {
1723                "perf sched replay [<options>]",
1724                NULL
1725        };
1726        const char * const sched_usage[] = {
1727                "perf sched [<options>] {record|latency|map|replay|script}",
1728                NULL
1729        };
1730        struct trace_sched_handler lat_ops  = {
1731                .wakeup_event       = latency_wakeup_event,
1732                .switch_event       = latency_switch_event,
1733                .runtime_event      = latency_runtime_event,
1734                .fork_event         = latency_fork_event,
1735                .migrate_task_event = latency_migrate_task_event,
1736        };
1737        struct trace_sched_handler map_ops  = {
1738                .switch_event       = map_switch_event,
1739        };
1740        struct trace_sched_handler replay_ops  = {
1741                .wakeup_event       = replay_wakeup_event,
1742                .switch_event       = replay_switch_event,
1743                .fork_event         = replay_fork_event,
1744        };
1745
1746        argc = parse_options(argc, argv, sched_options, sched_usage,
1747                             PARSE_OPT_STOP_AT_NON_OPTION);
1748        if (!argc)
1749                usage_with_options(sched_usage, sched_options);
1750
1751        /*
1752         * Aliased to 'perf script' for now:
1753         */
1754        if (!strcmp(argv[0], "script"))
1755                return cmd_script(argc, argv, prefix);
1756
1757        symbol__init();
1758        if (!strncmp(argv[0], "rec", 3)) {
1759                return __cmd_record(argc, argv);
1760        } else if (!strncmp(argv[0], "lat", 3)) {
1761                sched.tp_handler = &lat_ops;
1762                if (argc > 1) {
1763                        argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1764                        if (argc)
1765                                usage_with_options(latency_usage, latency_options);
1766                }
1767                setup_sorting(&sched, latency_options, latency_usage);
1768                return perf_sched__lat(&sched);
1769        } else if (!strcmp(argv[0], "map")) {
1770                sched.tp_handler = &map_ops;
1771                setup_sorting(&sched, latency_options, latency_usage);
1772                return perf_sched__map(&sched);
1773        } else if (!strncmp(argv[0], "rep", 3)) {
1774                sched.tp_handler = &replay_ops;
1775                if (argc) {
1776                        argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1777                        if (argc)
1778                                usage_with_options(replay_usage, replay_options);
1779                }
1780                return perf_sched__replay(&sched);
1781        } else {
1782                usage_with_options(sched_usage, sched_options);
1783        }
1784
1785        return 0;
1786}
1787
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.