linux/tools/perf/builtin-lock.c
<<
>>
Prefs
   1#include "builtin.h"
   2#include "perf.h"
   3
   4#include "util/util.h"
   5#include "util/cache.h"
   6#include "util/symbol.h"
   7#include "util/thread.h"
   8#include "util/header.h"
   9
  10#include "util/parse-options.h"
  11#include "util/trace-event.h"
  12
  13#include "util/debug.h"
  14#include "util/session.h"
  15
  16#include <sys/types.h>
  17#include <sys/prctl.h>
  18#include <semaphore.h>
  19#include <pthread.h>
  20#include <math.h>
  21#include <limits.h>
  22
  23#include <linux/list.h>
  24#include <linux/hash.h>
  25
  26static struct perf_session *session;
  27
  28/* based on kernel/lockdep.c */
  29#define LOCKHASH_BITS           12
  30#define LOCKHASH_SIZE           (1UL << LOCKHASH_BITS)
  31
  32static struct list_head lockhash_table[LOCKHASH_SIZE];
  33
  34#define __lockhashfn(key)       hash_long((unsigned long)key, LOCKHASH_BITS)
  35#define lockhashentry(key)      (lockhash_table + __lockhashfn((key)))
  36
  37struct lock_stat {
  38        struct list_head        hash_entry;
  39        struct rb_node          rb;             /* used for sorting */
  40
  41        /*
  42         * FIXME: raw_field_value() returns unsigned long long,
  43         * so address of lockdep_map should be dealed as 64bit.
  44         * Is there more better solution?
  45         */
  46        void                    *addr;          /* address of lockdep_map, used as ID */
  47        char                    *name;          /* for strcpy(), we cannot use const */
  48
  49        unsigned int            nr_acquire;
  50        unsigned int            nr_acquired;
  51        unsigned int            nr_contended;
  52        unsigned int            nr_release;
  53
  54        unsigned int            nr_readlock;
  55        unsigned int            nr_trylock;
  56        /* these times are in nano sec. */
  57        u64                     wait_time_total;
  58        u64                     wait_time_min;
  59        u64                     wait_time_max;
  60
  61        int                     discard; /* flag of blacklist */
  62};
  63
  64/*
  65 * States of lock_seq_stat
  66 *
  67 * UNINITIALIZED is required for detecting first event of acquire.
  68 * As the nature of lock events, there is no guarantee
  69 * that the first event for the locks are acquire,
  70 * it can be acquired, contended or release.
  71 */
  72#define SEQ_STATE_UNINITIALIZED      0         /* initial state */
  73#define SEQ_STATE_RELEASED      1
  74#define SEQ_STATE_ACQUIRING     2
  75#define SEQ_STATE_ACQUIRED      3
  76#define SEQ_STATE_READ_ACQUIRED 4
  77#define SEQ_STATE_CONTENDED     5
  78
  79/*
  80 * MAX_LOCK_DEPTH
  81 * Imported from include/linux/sched.h.
  82 * Should this be synchronized?
  83 */
  84#define MAX_LOCK_DEPTH 48
  85
  86/*
  87 * struct lock_seq_stat:
  88 * Place to put on state of one lock sequence
  89 * 1) acquire -> acquired -> release
  90 * 2) acquire -> contended -> acquired -> release
  91 * 3) acquire (with read or try) -> release
  92 * 4) Are there other patterns?
  93 */
  94struct lock_seq_stat {
  95        struct list_head        list;
  96        int                     state;
  97        u64                     prev_event_time;
  98        void                    *addr;
  99
 100        int                     read_count;
 101};
 102
 103struct thread_stat {
 104        struct rb_node          rb;
 105
 106        u32                     tid;
 107        struct list_head        seq_list;
 108};
 109
 110static struct rb_root           thread_stats;
 111
 112static struct thread_stat *thread_stat_find(u32 tid)
 113{
 114        struct rb_node *node;
 115        struct thread_stat *st;
 116
 117        node = thread_stats.rb_node;
 118        while (node) {
 119                st = container_of(node, struct thread_stat, rb);
 120                if (st->tid == tid)
 121                        return st;
 122                else if (tid < st->tid)
 123                        node = node->rb_left;
 124                else
 125                        node = node->rb_right;
 126        }
 127
 128        return NULL;
 129}
 130
 131static void thread_stat_insert(struct thread_stat *new)
 132{
 133        struct rb_node **rb = &thread_stats.rb_node;
 134        struct rb_node *parent = NULL;
 135        struct thread_stat *p;
 136
 137        while (*rb) {
 138                p = container_of(*rb, struct thread_stat, rb);
 139                parent = *rb;
 140
 141                if (new->tid < p->tid)
 142                        rb = &(*rb)->rb_left;
 143                else if (new->tid > p->tid)
 144                        rb = &(*rb)->rb_right;
 145                else
 146                        BUG_ON("inserting invalid thread_stat\n");
 147        }
 148
 149        rb_link_node(&new->rb, parent, rb);
 150        rb_insert_color(&new->rb, &thread_stats);
 151}
 152
 153static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
 154{
 155        struct thread_stat *st;
 156
 157        st = thread_stat_find(tid);
 158        if (st)
 159                return st;
 160
 161        st = zalloc(sizeof(struct thread_stat));
 162        if (!st)
 163                die("memory allocation failed\n");
 164
 165        st->tid = tid;
 166        INIT_LIST_HEAD(&st->seq_list);
 167
 168        thread_stat_insert(st);
 169
 170        return st;
 171}
 172
 173static struct thread_stat *thread_stat_findnew_first(u32 tid);
 174static struct thread_stat *(*thread_stat_findnew)(u32 tid) =
 175        thread_stat_findnew_first;
 176
 177static struct thread_stat *thread_stat_findnew_first(u32 tid)
 178{
 179        struct thread_stat *st;
 180
 181        st = zalloc(sizeof(struct thread_stat));
 182        if (!st)
 183                die("memory allocation failed\n");
 184        st->tid = tid;
 185        INIT_LIST_HEAD(&st->seq_list);
 186
 187        rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
 188        rb_insert_color(&st->rb, &thread_stats);
 189
 190        thread_stat_findnew = thread_stat_findnew_after_first;
 191        return st;
 192}
 193
 194/* build simple key function one is bigger than two */
 195#define SINGLE_KEY(member)                                              \
 196        static int lock_stat_key_ ## member(struct lock_stat *one,      \
 197                                         struct lock_stat *two)         \
 198        {                                                               \
 199                return one->member > two->member;                       \
 200        }
 201
 202SINGLE_KEY(nr_acquired)
 203SINGLE_KEY(nr_contended)
 204SINGLE_KEY(wait_time_total)
 205SINGLE_KEY(wait_time_max)
 206
 207static int lock_stat_key_wait_time_min(struct lock_stat *one,
 208                                        struct lock_stat *two)
 209{
 210        u64 s1 = one->wait_time_min;
 211        u64 s2 = two->wait_time_min;
 212        if (s1 == ULLONG_MAX)
 213                s1 = 0;
 214        if (s2 == ULLONG_MAX)
 215                s2 = 0;
 216        return s1 > s2;
 217}
 218
 219struct lock_key {
 220        /*
 221         * name: the value for specify by user
 222         * this should be simpler than raw name of member
 223         * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
 224         */
 225        const char              *name;
 226        int                     (*key)(struct lock_stat*, struct lock_stat*);
 227};
 228
 229static const char               *sort_key = "acquired";
 230
 231static int                      (*compare)(struct lock_stat *, struct lock_stat *);
 232
 233static struct rb_root           result; /* place to store sorted data */
 234
 235#define DEF_KEY_LOCK(name, fn_suffix)   \
 236        { #name, lock_stat_key_ ## fn_suffix }
 237struct lock_key keys[] = {
 238        DEF_KEY_LOCK(acquired, nr_acquired),
 239        DEF_KEY_LOCK(contended, nr_contended),
 240        DEF_KEY_LOCK(wait_total, wait_time_total),
 241        DEF_KEY_LOCK(wait_min, wait_time_min),
 242        DEF_KEY_LOCK(wait_max, wait_time_max),
 243
 244        /* extra comparisons much complicated should be here */
 245
 246        { NULL, NULL }
 247};
 248
 249static void select_key(void)
 250{
 251        int i;
 252
 253        for (i = 0; keys[i].name; i++) {
 254                if (!strcmp(keys[i].name, sort_key)) {
 255                        compare = keys[i].key;
 256                        return;
 257                }
 258        }
 259
 260        die("Unknown compare key:%s\n", sort_key);
 261}
 262
 263static void insert_to_result(struct lock_stat *st,
 264                             int (*bigger)(struct lock_stat *, struct lock_stat *))
 265{
 266        struct rb_node **rb = &result.rb_node;
 267        struct rb_node *parent = NULL;
 268        struct lock_stat *p;
 269
 270        while (*rb) {
 271                p = container_of(*rb, struct lock_stat, rb);
 272                parent = *rb;
 273
 274                if (bigger(st, p))
 275                        rb = &(*rb)->rb_left;
 276                else
 277                        rb = &(*rb)->rb_right;
 278        }
 279
 280        rb_link_node(&st->rb, parent, rb);
 281        rb_insert_color(&st->rb, &result);
 282}
 283
 284/* returns left most element of result, and erase it */
 285static struct lock_stat *pop_from_result(void)
 286{
 287        struct rb_node *node = result.rb_node;
 288
 289        if (!node)
 290                return NULL;
 291
 292        while (node->rb_left)
 293                node = node->rb_left;
 294
 295        rb_erase(node, &result);
 296        return container_of(node, struct lock_stat, rb);
 297}
 298
 299static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
 300{
 301        struct list_head *entry = lockhashentry(addr);
 302        struct lock_stat *ret, *new;
 303
 304        list_for_each_entry(ret, entry, hash_entry) {
 305                if (ret->addr == addr)
 306                        return ret;
 307        }
 308
 309        new = zalloc(sizeof(struct lock_stat));
 310        if (!new)
 311                goto alloc_failed;
 312
 313        new->addr = addr;
 314        new->name = zalloc(sizeof(char) * strlen(name) + 1);
 315        if (!new->name)
 316                goto alloc_failed;
 317        strcpy(new->name, name);
 318
 319        new->wait_time_min = ULLONG_MAX;
 320
 321        list_add(&new->hash_entry, entry);
 322        return new;
 323
 324alloc_failed:
 325        die("memory allocation failed\n");
 326}
 327
 328static char                     const *input_name = "perf.data";
 329
 330struct raw_event_sample {
 331        u32                     size;
 332        char                    data[0];
 333};
 334
 335struct trace_acquire_event {
 336        void                    *addr;
 337        const char              *name;
 338        int                     flag;
 339};
 340
 341struct trace_acquired_event {
 342        void                    *addr;
 343        const char              *name;
 344};
 345
 346struct trace_contended_event {
 347        void                    *addr;
 348        const char              *name;
 349};
 350
 351struct trace_release_event {
 352        void                    *addr;
 353        const char              *name;
 354};
 355
 356struct trace_lock_handler {
 357        void (*acquire_event)(struct trace_acquire_event *,
 358                              struct event *,
 359                              int cpu,
 360                              u64 timestamp,
 361                              struct thread *thread);
 362
 363        void (*acquired_event)(struct trace_acquired_event *,
 364                               struct event *,
 365                               int cpu,
 366                               u64 timestamp,
 367                               struct thread *thread);
 368
 369        void (*contended_event)(struct trace_contended_event *,
 370                                struct event *,
 371                                int cpu,
 372                                u64 timestamp,
 373                                struct thread *thread);
 374
 375        void (*release_event)(struct trace_release_event *,
 376                              struct event *,
 377                              int cpu,
 378                              u64 timestamp,
 379                              struct thread *thread);
 380};
 381
 382static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr)
 383{
 384        struct lock_seq_stat *seq;
 385
 386        list_for_each_entry(seq, &ts->seq_list, list) {
 387                if (seq->addr == addr)
 388                        return seq;
 389        }
 390
 391        seq = zalloc(sizeof(struct lock_seq_stat));
 392        if (!seq)
 393                die("Not enough memory\n");
 394        seq->state = SEQ_STATE_UNINITIALIZED;
 395        seq->addr = addr;
 396
 397        list_add(&seq->list, &ts->seq_list);
 398        return seq;
 399}
 400
 401enum broken_state {
 402        BROKEN_ACQUIRE,
 403        BROKEN_ACQUIRED,
 404        BROKEN_CONTENDED,
 405        BROKEN_RELEASE,
 406        BROKEN_MAX,
 407};
 408
 409static int bad_hist[BROKEN_MAX];
 410
 411enum acquire_flags {
 412        TRY_LOCK = 1,
 413        READ_LOCK = 2,
 414};
 415
 416static void
 417report_lock_acquire_event(struct trace_acquire_event *acquire_event,
 418                        struct event *__event __used,
 419                        int cpu __used,
 420                        u64 timestamp __used,
 421                        struct thread *thread __used)
 422{
 423        struct lock_stat *ls;
 424        struct thread_stat *ts;
 425        struct lock_seq_stat *seq;
 426
 427        ls = lock_stat_findnew(acquire_event->addr, acquire_event->name);
 428        if (ls->discard)
 429                return;
 430
 431        ts = thread_stat_findnew(thread->pid);
 432        seq = get_seq(ts, acquire_event->addr);
 433
 434        switch (seq->state) {
 435        case SEQ_STATE_UNINITIALIZED:
 436        case SEQ_STATE_RELEASED:
 437                if (!acquire_event->flag) {
 438                        seq->state = SEQ_STATE_ACQUIRING;
 439                } else {
 440                        if (acquire_event->flag & TRY_LOCK)
 441                                ls->nr_trylock++;
 442                        if (acquire_event->flag & READ_LOCK)
 443                                ls->nr_readlock++;
 444                        seq->state = SEQ_STATE_READ_ACQUIRED;
 445                        seq->read_count = 1;
 446                        ls->nr_acquired++;
 447                }
 448                break;
 449        case SEQ_STATE_READ_ACQUIRED:
 450                if (acquire_event->flag & READ_LOCK) {
 451                        seq->read_count++;
 452                        ls->nr_acquired++;
 453                        goto end;
 454                } else {
 455                        goto broken;
 456                }
 457                break;
 458        case SEQ_STATE_ACQUIRED:
 459        case SEQ_STATE_ACQUIRING:
 460        case SEQ_STATE_CONTENDED:
 461broken:
 462                /* broken lock sequence, discard it */
 463                ls->discard = 1;
 464                bad_hist[BROKEN_ACQUIRE]++;
 465                list_del(&seq->list);
 466                free(seq);
 467                goto end;
 468                break;
 469        default:
 470                BUG_ON("Unknown state of lock sequence found!\n");
 471                break;
 472        }
 473
 474        ls->nr_acquire++;
 475        seq->prev_event_time = timestamp;
 476end:
 477        return;
 478}
 479
 480static void
 481report_lock_acquired_event(struct trace_acquired_event *acquired_event,
 482                         struct event *__event __used,
 483                         int cpu __used,
 484                         u64 timestamp __used,
 485                         struct thread *thread __used)
 486{
 487        struct lock_stat *ls;
 488        struct thread_stat *ts;
 489        struct lock_seq_stat *seq;
 490        u64 contended_term;
 491
 492        ls = lock_stat_findnew(acquired_event->addr, acquired_event->name);
 493        if (ls->discard)
 494                return;
 495
 496        ts = thread_stat_findnew(thread->pid);
 497        seq = get_seq(ts, acquired_event->addr);
 498
 499        switch (seq->state) {
 500        case SEQ_STATE_UNINITIALIZED:
 501                /* orphan event, do nothing */
 502                return;
 503        case SEQ_STATE_ACQUIRING:
 504                break;
 505        case SEQ_STATE_CONTENDED:
 506                contended_term = timestamp - seq->prev_event_time;
 507                ls->wait_time_total += contended_term;
 508                if (contended_term < ls->wait_time_min)
 509                        ls->wait_time_min = contended_term;
 510                if (ls->wait_time_max < contended_term)
 511                        ls->wait_time_max = contended_term;
 512                break;
 513        case SEQ_STATE_RELEASED:
 514        case SEQ_STATE_ACQUIRED:
 515        case SEQ_STATE_READ_ACQUIRED:
 516                /* broken lock sequence, discard it */
 517                ls->discard = 1;
 518                bad_hist[BROKEN_ACQUIRED]++;
 519                list_del(&seq->list);
 520                free(seq);
 521                goto end;
 522                break;
 523
 524        default:
 525                BUG_ON("Unknown state of lock sequence found!\n");
 526                break;
 527        }
 528
 529        seq->state = SEQ_STATE_ACQUIRED;
 530        ls->nr_acquired++;
 531        seq->prev_event_time = timestamp;
 532end:
 533        return;
 534}
 535
 536static void
 537report_lock_contended_event(struct trace_contended_event *contended_event,
 538                          struct event *__event __used,
 539                          int cpu __used,
 540                          u64 timestamp __used,
 541                          struct thread *thread __used)
 542{
 543        struct lock_stat *ls;
 544        struct thread_stat *ts;
 545        struct lock_seq_stat *seq;
 546
 547        ls = lock_stat_findnew(contended_event->addr, contended_event->name);
 548        if (ls->discard)
 549                return;
 550
 551        ts = thread_stat_findnew(thread->pid);
 552        seq = get_seq(ts, contended_event->addr);
 553
 554        switch (seq->state) {
 555        case SEQ_STATE_UNINITIALIZED:
 556                /* orphan event, do nothing */
 557                return;
 558        case SEQ_STATE_ACQUIRING:
 559                break;
 560        case SEQ_STATE_RELEASED:
 561        case SEQ_STATE_ACQUIRED:
 562        case SEQ_STATE_READ_ACQUIRED:
 563        case SEQ_STATE_CONTENDED:
 564                /* broken lock sequence, discard it */
 565                ls->discard = 1;
 566                bad_hist[BROKEN_CONTENDED]++;
 567                list_del(&seq->list);
 568                free(seq);
 569                goto end;
 570                break;
 571        default:
 572                BUG_ON("Unknown state of lock sequence found!\n");
 573                break;
 574        }
 575
 576        seq->state = SEQ_STATE_CONTENDED;
 577        ls->nr_contended++;
 578        seq->prev_event_time = timestamp;
 579end:
 580        return;
 581}
 582
 583static void
 584report_lock_release_event(struct trace_release_event *release_event,
 585                        struct event *__event __used,
 586                        int cpu __used,
 587                        u64 timestamp __used,
 588                        struct thread *thread __used)
 589{
 590        struct lock_stat *ls;
 591        struct thread_stat *ts;
 592        struct lock_seq_stat *seq;
 593
 594        ls = lock_stat_findnew(release_event->addr, release_event->name);
 595        if (ls->discard)
 596                return;
 597
 598        ts = thread_stat_findnew(thread->pid);
 599        seq = get_seq(ts, release_event->addr);
 600
 601        switch (seq->state) {
 602        case SEQ_STATE_UNINITIALIZED:
 603                goto end;
 604                break;
 605        case SEQ_STATE_ACQUIRED:
 606                break;
 607        case SEQ_STATE_READ_ACQUIRED:
 608                seq->read_count--;
 609                BUG_ON(seq->read_count < 0);
 610                if (!seq->read_count) {
 611                        ls->nr_release++;
 612                        goto end;
 613                }
 614                break;
 615        case SEQ_STATE_ACQUIRING:
 616        case SEQ_STATE_CONTENDED:
 617        case SEQ_STATE_RELEASED:
 618                /* broken lock sequence, discard it */
 619                ls->discard = 1;
 620                bad_hist[BROKEN_RELEASE]++;
 621                goto free_seq;
 622                break;
 623        default:
 624                BUG_ON("Unknown state of lock sequence found!\n");
 625                break;
 626        }
 627
 628        ls->nr_release++;
 629free_seq:
 630        list_del(&seq->list);
 631        free(seq);
 632end:
 633        return;
 634}
 635
 636/* lock oriented handlers */
 637/* TODO: handlers for CPU oriented, thread oriented */
 638static struct trace_lock_handler report_lock_ops  = {
 639        .acquire_event          = report_lock_acquire_event,
 640        .acquired_event         = report_lock_acquired_event,
 641        .contended_event        = report_lock_contended_event,
 642        .release_event          = report_lock_release_event,
 643};
 644
 645static struct trace_lock_handler *trace_handler;
 646
 647static void
 648process_lock_acquire_event(void *data,
 649                           struct event *event __used,
 650                           int cpu __used,
 651                           u64 timestamp __used,
 652                           struct thread *thread __used)
 653{
 654        struct trace_acquire_event acquire_event;
 655        u64 tmp;                /* this is required for casting... */
 656
 657        tmp = raw_field_value(event, "lockdep_addr", data);
 658        memcpy(&acquire_event.addr, &tmp, sizeof(void *));
 659        acquire_event.name = (char *)raw_field_ptr(event, "name", data);
 660        acquire_event.flag = (int)raw_field_value(event, "flag", data);
 661
 662        if (trace_handler->acquire_event)
 663                trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread);
 664}
 665
 666static void
 667process_lock_acquired_event(void *data,
 668                            struct event *event __used,
 669                            int cpu __used,
 670                            u64 timestamp __used,
 671                            struct thread *thread __used)
 672{
 673        struct trace_acquired_event acquired_event;
 674        u64 tmp;                /* this is required for casting... */
 675
 676        tmp = raw_field_value(event, "lockdep_addr", data);
 677        memcpy(&acquired_event.addr, &tmp, sizeof(void *));
 678        acquired_event.name = (char *)raw_field_ptr(event, "name", data);
 679
 680        if (trace_handler->acquire_event)
 681                trace_handler->acquired_event(&acquired_event, event, cpu, timestamp, thread);
 682}
 683
 684static void
 685process_lock_contended_event(void *data,
 686                             struct event *event __used,
 687                             int cpu __used,
 688                             u64 timestamp __used,
 689                             struct thread *thread __used)
 690{
 691        struct trace_contended_event contended_event;
 692        u64 tmp;                /* this is required for casting... */
 693
 694        tmp = raw_field_value(event, "lockdep_addr", data);
 695        memcpy(&contended_event.addr, &tmp, sizeof(void *));
 696        contended_event.name = (char *)raw_field_ptr(event, "name", data);
 697
 698        if (trace_handler->acquire_event)
 699                trace_handler->contended_event(&contended_event, event, cpu, timestamp, thread);
 700}
 701
 702static void
 703process_lock_release_event(void *data,
 704                           struct event *event __used,
 705                           int cpu __used,
 706                           u64 timestamp __used,
 707                           struct thread *thread __used)
 708{
 709        struct trace_release_event release_event;
 710        u64 tmp;                /* this is required for casting... */
 711
 712        tmp = raw_field_value(event, "lockdep_addr", data);
 713        memcpy(&release_event.addr, &tmp, sizeof(void *));
 714        release_event.name = (char *)raw_field_ptr(event, "name", data);
 715
 716        if (trace_handler->acquire_event)
 717                trace_handler->release_event(&release_event, event, cpu, timestamp, thread);
 718}
 719
 720static void
 721process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread)
 722{
 723        struct event *event;
 724        int type;
 725
 726        type = trace_parse_common_type(data);
 727        event = trace_find_event(type);
 728
 729        if (!strcmp(event->name, "lock_acquire"))
 730                process_lock_acquire_event(data, event, cpu, timestamp, thread);
 731        if (!strcmp(event->name, "lock_acquired"))
 732                process_lock_acquired_event(data, event, cpu, timestamp, thread);
 733        if (!strcmp(event->name, "lock_contended"))
 734                process_lock_contended_event(data, event, cpu, timestamp, thread);
 735        if (!strcmp(event->name, "lock_release"))
 736                process_lock_release_event(data, event, cpu, timestamp, thread);
 737}
 738
 739static void print_bad_events(int bad, int total)
 740{
 741        /* Output for debug, this have to be removed */
 742        int i;
 743        const char *name[4] =
 744                { "acquire", "acquired", "contended", "release" };
 745
 746        pr_info("\n=== output for debug===\n\n");
 747        pr_info("bad: %d, total: %d\n", bad, total);
 748        pr_info("bad rate: %f %%\n", (double)bad / (double)total * 100);
 749        pr_info("histogram of events caused bad sequence\n");
 750        for (i = 0; i < BROKEN_MAX; i++)
 751                pr_info(" %10s: %d\n", name[i], bad_hist[i]);
 752}
 753
 754/* TODO: various way to print, coloring, nano or milli sec */
 755static void print_result(void)
 756{
 757        struct lock_stat *st;
 758        char cut_name[20];
 759        int bad, total;
 760
 761        pr_info("%20s ", "Name");
 762        pr_info("%10s ", "acquired");
 763        pr_info("%10s ", "contended");
 764
 765        pr_info("%15s ", "total wait (ns)");
 766        pr_info("%15s ", "max wait (ns)");
 767        pr_info("%15s ", "min wait (ns)");
 768
 769        pr_info("\n\n");
 770
 771        bad = total = 0;
 772        while ((st = pop_from_result())) {
 773                total++;
 774                if (st->discard) {
 775                        bad++;
 776                        continue;
 777                }
 778                bzero(cut_name, 20);
 779
 780                if (strlen(st->name) < 16) {
 781                        /* output raw name */
 782                        pr_info("%20s ", st->name);
 783                } else {
 784                        strncpy(cut_name, st->name, 16);
 785                        cut_name[16] = '.';
 786                        cut_name[17] = '.';
 787                        cut_name[18] = '.';
 788                        cut_name[19] = '\0';
 789                        /* cut off name for saving output style */
 790                        pr_info("%20s ", cut_name);
 791                }
 792
 793                pr_info("%10u ", st->nr_acquired);
 794                pr_info("%10u ", st->nr_contended);
 795
 796                pr_info("%15" PRIu64 " ", st->wait_time_total);
 797                pr_info("%15" PRIu64 " ", st->wait_time_max);
 798                pr_info("%15" PRIu64 " ", st->wait_time_min == ULLONG_MAX ?
 799                       0 : st->wait_time_min);
 800                pr_info("\n");
 801        }
 802
 803        print_bad_events(bad, total);
 804}
 805
 806static bool info_threads, info_map;
 807
 808static void dump_threads(void)
 809{
 810        struct thread_stat *st;
 811        struct rb_node *node;
 812        struct thread *t;
 813
 814        pr_info("%10s: comm\n", "Thread ID");
 815
 816        node = rb_first(&thread_stats);
 817        while (node) {
 818                st = container_of(node, struct thread_stat, rb);
 819                t = perf_session__findnew(session, st->tid);
 820                pr_info("%10d: %s\n", st->tid, t->comm);
 821                node = rb_next(node);
 822        };
 823}
 824
 825static void dump_map(void)
 826{
 827        unsigned int i;
 828        struct lock_stat *st;
 829
 830        pr_info("Address of instance: name of class\n");
 831        for (i = 0; i < LOCKHASH_SIZE; i++) {
 832                list_for_each_entry(st, &lockhash_table[i], hash_entry) {
 833                        pr_info(" %p: %s\n", st->addr, st->name);
 834                }
 835        }
 836}
 837
 838static void dump_info(void)
 839{
 840        if (info_threads)
 841                dump_threads();
 842        else if (info_map)
 843                dump_map();
 844        else
 845                die("Unknown type of information\n");
 846}
 847
 848static int process_sample_event(union perf_event *event,
 849                                struct perf_sample *sample,
 850                                struct perf_evsel *evsel __used,
 851                                struct perf_session *s)
 852{
 853        struct thread *thread = perf_session__findnew(s, sample->tid);
 854
 855        if (thread == NULL) {
 856                pr_debug("problem processing %d event, skipping it.\n",
 857                        event->header.type);
 858                return -1;
 859        }
 860
 861        process_raw_event(sample->raw_data, sample->cpu, sample->time, thread);
 862
 863        return 0;
 864}
 865
 866static struct perf_event_ops eops = {
 867        .sample                 = process_sample_event,
 868        .comm                   = perf_event__process_comm,
 869        .ordered_samples        = true,
 870};
 871
 872static int read_events(void)
 873{
 874        session = perf_session__new(input_name, O_RDONLY, 0, false, &eops);
 875        if (!session)
 876                die("Initializing perf session failed\n");
 877
 878        return perf_session__process_events(session, &eops);
 879}
 880
 881static void sort_result(void)
 882{
 883        unsigned int i;
 884        struct lock_stat *st;
 885
 886        for (i = 0; i < LOCKHASH_SIZE; i++) {
 887                list_for_each_entry(st, &lockhash_table[i], hash_entry) {
 888                        insert_to_result(st, compare);
 889                }
 890        }
 891}
 892
 893static void __cmd_report(void)
 894{
 895        setup_pager();
 896        select_key();
 897        read_events();
 898        sort_result();
 899        print_result();
 900}
 901
 902static const char * const report_usage[] = {
 903        "perf lock report [<options>]",
 904        NULL
 905};
 906
 907static const struct option report_options[] = {
 908        OPT_STRING('k', "key", &sort_key, "acquired",
 909                    "key for sorting (acquired / contended / wait_total / wait_max / wait_min)"),
 910        /* TODO: type */
 911        OPT_END()
 912};
 913
 914static const char * const info_usage[] = {
 915        "perf lock info [<options>]",
 916        NULL
 917};
 918
 919static const struct option info_options[] = {
 920        OPT_BOOLEAN('t', "threads", &info_threads,
 921                    "dump thread list in perf.data"),
 922        OPT_BOOLEAN('m', "map", &info_map,
 923                    "map of lock instances (name:address table)"),
 924        OPT_END()
 925};
 926
 927static const char * const lock_usage[] = {
 928        "perf lock [<options>] {record|trace|report}",
 929        NULL
 930};
 931
 932static const struct option lock_options[] = {
 933        OPT_STRING('i', "input", &input_name, "file", "input file name"),
 934        OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
 935        OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
 936        OPT_END()
 937};
 938
 939static const char *record_args[] = {
 940        "record",
 941        "-R",
 942        "-f",
 943        "-m", "1024",
 944        "-c", "1",
 945        "-e", "lock:lock_acquire:r",
 946        "-e", "lock:lock_acquired:r",
 947        "-e", "lock:lock_contended:r",
 948        "-e", "lock:lock_release:r",
 949};
 950
 951static int __cmd_record(int argc, const char **argv)
 952{
 953        unsigned int rec_argc, i, j;
 954        const char **rec_argv;
 955
 956        rec_argc = ARRAY_SIZE(record_args) + argc - 1;
 957        rec_argv = calloc(rec_argc + 1, sizeof(char *));
 958
 959        if (rec_argv == NULL)
 960                return -ENOMEM;
 961
 962        for (i = 0; i < ARRAY_SIZE(record_args); i++)
 963                rec_argv[i] = strdup(record_args[i]);
 964
 965        for (j = 1; j < (unsigned int)argc; j++, i++)
 966                rec_argv[i] = argv[j];
 967
 968        BUG_ON(i != rec_argc);
 969
 970        return cmd_record(i, rec_argv, NULL);
 971}
 972
 973int cmd_lock(int argc, const char **argv, const char *prefix __used)
 974{
 975        unsigned int i;
 976
 977        symbol__init();
 978        for (i = 0; i < LOCKHASH_SIZE; i++)
 979                INIT_LIST_HEAD(lockhash_table + i);
 980
 981        argc = parse_options(argc, argv, lock_options, lock_usage,
 982                             PARSE_OPT_STOP_AT_NON_OPTION);
 983        if (!argc)
 984                usage_with_options(lock_usage, lock_options);
 985
 986        if (!strncmp(argv[0], "rec", 3)) {
 987                return __cmd_record(argc, argv);
 988        } else if (!strncmp(argv[0], "report", 6)) {
 989                trace_handler = &report_lock_ops;
 990                if (argc) {
 991                        argc = parse_options(argc, argv,
 992                                             report_options, report_usage, 0);
 993                        if (argc)
 994                                usage_with_options(report_usage, report_options);
 995                }
 996                __cmd_report();
 997        } else if (!strcmp(argv[0], "script")) {
 998                /* Aliased to 'perf script' */
 999                return cmd_script(argc, argv, prefix);
1000        } else if (!strcmp(argv[0], "info")) {
1001                if (argc) {
1002                        argc = parse_options(argc, argv,
1003                                             info_options, info_usage, 0);
1004                        if (argc)
1005                                usage_with_options(info_usage, info_options);
1006                }
1007                /* recycling report_lock_ops */
1008                trace_handler = &report_lock_ops;
1009                setup_pager();
1010                read_events();
1011                dump_info();
1012        } else {
1013                usage_with_options(lock_usage, lock_options);
1014        }
1015
1016        return 0;
1017}
1018