linux/include/trace/ftrace.h
<<
>>
Prefs
   1/*
   2 * Stage 1 of the trace events.
   3 *
   4 * Override the macros in <trace/trace_events.h> to include the following:
   5 *
   6 * struct ftrace_raw_<call> {
   7 *      struct trace_entry              ent;
   8 *      <type>                          <item>;
   9 *      <type2>                         <item2>[<len>];
  10 *      [...]
  11 * };
  12 *
  13 * The <type> <item> is created by the __field(type, item) macro or
  14 * the __array(type2, item2, len) macro.
  15 * We simply do "type item;", and that will create the fields
  16 * in the structure.
  17 */
  18
  19#include <linux/ftrace_event.h>
  20
  21/*
  22 * DECLARE_EVENT_CLASS can be used to add a generic function
  23 * handlers for events. That is, if all events have the same
  24 * parameters and just have distinct trace points.
  25 * Each tracepoint can be defined with DEFINE_EVENT and that
  26 * will map the DECLARE_EVENT_CLASS to the tracepoint.
  27 *
  28 * TRACE_EVENT is a one to one mapping between tracepoint and template.
  29 */
  30#undef TRACE_EVENT
  31#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
  32        DECLARE_EVENT_CLASS(name,                              \
  33                             PARAMS(proto),                    \
  34                             PARAMS(args),                     \
  35                             PARAMS(tstruct),                  \
  36                             PARAMS(assign),                   \
  37                             PARAMS(print));                   \
  38        DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
  39
  40
  41#undef __field
  42#define __field(type, item)             type    item;
  43
  44#undef __field_ext
  45#define __field_ext(type, item, filter_type)    type    item;
  46
  47#undef __array
  48#define __array(type, item, len)        type    item[len];
  49
  50#undef __dynamic_array
  51#define __dynamic_array(type, item, len) u32 __data_loc_##item;
  52
  53#undef __string
  54#define __string(item, src) __dynamic_array(char, item, -1)
  55
  56#undef TP_STRUCT__entry
  57#define TP_STRUCT__entry(args...) args
  58
  59#undef DECLARE_EVENT_CLASS
  60#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print)  \
  61        struct ftrace_raw_##name {                                      \
  62                struct trace_entry      ent;                            \
  63                tstruct                                                 \
  64                char                    __data[0];                      \
  65        };                                                              \
  66                                                                        \
  67        static struct ftrace_event_class event_class_##name;
  68
  69#undef DEFINE_EVENT
  70#define DEFINE_EVENT(template, name, proto, args)       \
  71        static struct ftrace_event_call __used          \
  72        __attribute__((__aligned__(4))) event_##name
  73
  74#undef DEFINE_EVENT_PRINT
  75#define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
  76        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
  77
  78/* Callbacks are meaningless to ftrace. */
  79#undef TRACE_EVENT_FN
  80#define TRACE_EVENT_FN(name, proto, args, tstruct,                      \
  81                assign, print, reg, unreg)                              \
  82        TRACE_EVENT(name, PARAMS(proto), PARAMS(args),                  \
  83                PARAMS(tstruct), PARAMS(assign), PARAMS(print))         \
  84
  85#undef TRACE_EVENT_FLAGS
  86#define TRACE_EVENT_FLAGS(name, value)                                  \
  87        __TRACE_EVENT_FLAGS(name, value)
  88
  89#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
  90
  91
  92/*
  93 * Stage 2 of the trace events.
  94 *
  95 * Include the following:
  96 *
  97 * struct ftrace_data_offsets_<call> {
  98 *      u32                             <item1>;
  99 *      u32                             <item2>;
 100 *      [...]
 101 * };
 102 *
 103 * The __dynamic_array() macro will create each u32 <item>, this is
 104 * to keep the offset of each array from the beginning of the event.
 105 * The size of an array is also encoded, in the higher 16 bits of <item>.
 106 */
 107
 108#undef __field
 109#define __field(type, item)
 110
 111#undef __field_ext
 112#define __field_ext(type, item, filter_type)
 113
 114#undef __array
 115#define __array(type, item, len)
 116
 117#undef __dynamic_array
 118#define __dynamic_array(type, item, len)        u32 item;
 119
 120#undef __string
 121#define __string(item, src) __dynamic_array(char, item, -1)
 122
 123#undef DECLARE_EVENT_CLASS
 124#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
 125        struct ftrace_data_offsets_##call {                             \
 126                tstruct;                                                \
 127        };
 128
 129#undef DEFINE_EVENT
 130#define DEFINE_EVENT(template, name, proto, args)
 131
 132#undef DEFINE_EVENT_PRINT
 133#define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
 134        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 135
 136#undef TRACE_EVENT_FLAGS
 137#define TRACE_EVENT_FLAGS(event, flag)
 138
 139#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 140
 141/*
 142 * Stage 3 of the trace events.
 143 *
 144 * Override the macros in <trace/trace_events.h> to include the following:
 145 *
 146 * enum print_line_t
 147 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
 148 * {
 149 *      struct trace_seq *s = &iter->seq;
 150 *      struct ftrace_raw_<call> *field; <-- defined in stage 1
 151 *      struct trace_entry *entry;
 152 *      struct trace_seq *p = &iter->tmp_seq;
 153 *      int ret;
 154 *
 155 *      entry = iter->ent;
 156 *
 157 *      if (entry->type != event_<call>->event.type) {
 158 *              WARN_ON_ONCE(1);
 159 *              return TRACE_TYPE_UNHANDLED;
 160 *      }
 161 *
 162 *      field = (typeof(field))entry;
 163 *
 164 *      trace_seq_init(p);
 165 *      ret = trace_seq_printf(s, "%s: ", <call>);
 166 *      if (ret)
 167 *              ret = trace_seq_printf(s, <TP_printk> "\n");
 168 *      if (!ret)
 169 *              return TRACE_TYPE_PARTIAL_LINE;
 170 *
 171 *      return TRACE_TYPE_HANDLED;
 172 * }
 173 *
 174 * This is the method used to print the raw event to the trace
 175 * output format. Note, this is not needed if the data is read
 176 * in binary.
 177 */
 178
 179#undef __entry
 180#define __entry field
 181
 182#undef TP_printk
 183#define TP_printk(fmt, args...) fmt "\n", args
 184
 185#undef __get_dynamic_array
 186#define __get_dynamic_array(field)      \
 187                ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
 188
 189#undef __get_str
 190#define __get_str(field) (char *)__get_dynamic_array(field)
 191
 192#undef __print_flags
 193#define __print_flags(flag, delim, flag_array...)                       \
 194        ({                                                              \
 195                static const struct trace_print_flags __flags[] =       \
 196                        { flag_array, { -1, NULL }};                    \
 197                ftrace_print_flags_seq(p, delim, flag, __flags);        \
 198        })
 199
 200#undef __print_symbolic
 201#define __print_symbolic(value, symbol_array...)                        \
 202        ({                                                              \
 203                static const struct trace_print_flags symbols[] =       \
 204                        { symbol_array, { -1, NULL }};                  \
 205                ftrace_print_symbols_seq(p, value, symbols);            \
 206        })
 207
 208#undef __print_symbolic_u64
 209#if BITS_PER_LONG == 32
 210#define __print_symbolic_u64(value, symbol_array...)                    \
 211        ({                                                              \
 212                static const struct trace_print_flags_u64 symbols[] =   \
 213                        { symbol_array, { -1, NULL } };                 \
 214                ftrace_print_symbols_seq_u64(p, value, symbols);        \
 215        })
 216#else
 217#define __print_symbolic_u64(value, symbol_array...)                    \
 218                        __print_symbolic(value, symbol_array)
 219#endif
 220
 221#undef __print_hex
 222#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
 223
 224#undef DECLARE_EVENT_CLASS
 225#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
 226static notrace enum print_line_t                                        \
 227ftrace_raw_output_##call(struct trace_iterator *iter, int flags,        \
 228                         struct trace_event *trace_event)               \
 229{                                                                       \
 230        struct ftrace_event_call *event;                                \
 231        struct trace_seq *s = &iter->seq;                               \
 232        struct ftrace_raw_##call *field;                                \
 233        struct trace_entry *entry;                                      \
 234        struct trace_seq *p = &iter->tmp_seq;                           \
 235        int ret;                                                        \
 236                                                                        \
 237        event = container_of(trace_event, struct ftrace_event_call,     \
 238                             event);                                    \
 239                                                                        \
 240        entry = iter->ent;                                              \
 241                                                                        \
 242        if (entry->type != event->event.type) {                         \
 243                WARN_ON_ONCE(1);                                        \
 244                return TRACE_TYPE_UNHANDLED;                            \
 245        }                                                               \
 246                                                                        \
 247        field = (typeof(field))entry;                                   \
 248                                                                        \
 249        trace_seq_init(p);                                              \
 250        ret = trace_seq_printf(s, "%s: ", event->name);                 \
 251        if (ret)                                                        \
 252                ret = trace_seq_printf(s, print);                       \
 253        if (!ret)                                                       \
 254                return TRACE_TYPE_PARTIAL_LINE;                         \
 255                                                                        \
 256        return TRACE_TYPE_HANDLED;                                      \
 257}                                                                       \
 258static struct trace_event_functions ftrace_event_type_funcs_##call = {  \
 259        .trace                  = ftrace_raw_output_##call,             \
 260};
 261
 262#undef DEFINE_EVENT_PRINT
 263#define DEFINE_EVENT_PRINT(template, call, proto, args, print)          \
 264static notrace enum print_line_t                                        \
 265ftrace_raw_output_##call(struct trace_iterator *iter, int flags,        \
 266                         struct trace_event *event)                     \
 267{                                                                       \
 268        struct trace_seq *s = &iter->seq;                               \
 269        struct ftrace_raw_##template *field;                            \
 270        struct trace_entry *entry;                                      \
 271        struct trace_seq *p = &iter->tmp_seq;                           \
 272        int ret;                                                        \
 273                                                                        \
 274        entry = iter->ent;                                              \
 275                                                                        \
 276        if (entry->type != event_##call.event.type) {                   \
 277                WARN_ON_ONCE(1);                                        \
 278                return TRACE_TYPE_UNHANDLED;                            \
 279        }                                                               \
 280                                                                        \
 281        field = (typeof(field))entry;                                   \
 282                                                                        \
 283        trace_seq_init(p);                                              \
 284        ret = trace_seq_printf(s, "%s: ", #call);                       \
 285        if (ret)                                                        \
 286                ret = trace_seq_printf(s, print);                       \
 287        if (!ret)                                                       \
 288                return TRACE_TYPE_PARTIAL_LINE;                         \
 289                                                                        \
 290        return TRACE_TYPE_HANDLED;                                      \
 291}                                                                       \
 292static struct trace_event_functions ftrace_event_type_funcs_##call = {  \
 293        .trace                  = ftrace_raw_output_##call,             \
 294};
 295
 296#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 297
 298#undef __field_ext
 299#define __field_ext(type, item, filter_type)                            \
 300        ret = trace_define_field(event_call, #type, #item,              \
 301                                 offsetof(typeof(field), item),         \
 302                                 sizeof(field.item),                    \
 303                                 is_signed_type(type), filter_type);    \
 304        if (ret)                                                        \
 305                return ret;
 306
 307#undef __field
 308#define __field(type, item)     __field_ext(type, item, FILTER_OTHER)
 309
 310#undef __array
 311#define __array(type, item, len)                                        \
 312        do {                                                            \
 313                mutex_lock(&event_storage_mutex);                       \
 314                BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);                 \
 315                snprintf(event_storage, sizeof(event_storage),          \
 316                         "%s[%d]", #type, len);                         \
 317                ret = trace_define_field(event_call, event_storage, #item, \
 318                                 offsetof(typeof(field), item),         \
 319                                 sizeof(field.item),                    \
 320                                 is_signed_type(type), FILTER_OTHER);   \
 321                mutex_unlock(&event_storage_mutex);                     \
 322                if (ret)                                                \
 323                        return ret;                                     \
 324        } while (0);
 325
 326#undef __dynamic_array
 327#define __dynamic_array(type, item, len)                                       \
 328        ret = trace_define_field(event_call, "__data_loc " #type "[]", #item,  \
 329                                 offsetof(typeof(field), __data_loc_##item),   \
 330                                 sizeof(field.__data_loc_##item),              \
 331                                 is_signed_type(type), FILTER_OTHER);
 332
 333#undef __string
 334#define __string(item, src) __dynamic_array(char, item, -1)
 335
 336#undef DECLARE_EVENT_CLASS
 337#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print)    \
 338static int notrace                                                      \
 339ftrace_define_fields_##call(struct ftrace_event_call *event_call)       \
 340{                                                                       \
 341        struct ftrace_raw_##call field;                                 \
 342        int ret;                                                        \
 343                                                                        \
 344        tstruct;                                                        \
 345                                                                        \
 346        return ret;                                                     \
 347}
 348
 349#undef DEFINE_EVENT
 350#define DEFINE_EVENT(template, name, proto, args)
 351
 352#undef DEFINE_EVENT_PRINT
 353#define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
 354        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 355
 356#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 357
 358/*
 359 * remember the offset of each array from the beginning of the event.
 360 */
 361
 362#undef __entry
 363#define __entry entry
 364
 365#undef __field
 366#define __field(type, item)
 367
 368#undef __field_ext
 369#define __field_ext(type, item, filter_type)
 370
 371#undef __array
 372#define __array(type, item, len)
 373
 374#undef __dynamic_array
 375#define __dynamic_array(type, item, len)                                \
 376        __data_offsets->item = __data_size +                            \
 377                               offsetof(typeof(*entry), __data);        \
 378        __data_offsets->item |= (len * sizeof(type)) << 16;             \
 379        __data_size += (len) * sizeof(type);
 380
 381#undef __string
 382#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
 383
 384#undef DECLARE_EVENT_CLASS
 385#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
 386static inline notrace int ftrace_get_offsets_##call(                    \
 387        struct ftrace_data_offsets_##call *__data_offsets, proto)       \
 388{                                                                       \
 389        int __data_size = 0;                                            \
 390        struct ftrace_raw_##call __maybe_unused *entry;                 \
 391                                                                        \
 392        tstruct;                                                        \
 393                                                                        \
 394        return __data_size;                                             \
 395}
 396
 397#undef DEFINE_EVENT
 398#define DEFINE_EVENT(template, name, proto, args)
 399
 400#undef DEFINE_EVENT_PRINT
 401#define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
 402        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 403
 404#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 405
 406/*
 407 * Stage 4 of the trace events.
 408 *
 409 * Override the macros in <trace/trace_events.h> to include the following:
 410 *
 411 * For those macros defined with TRACE_EVENT:
 412 *
 413 * static struct ftrace_event_call event_<call>;
 414 *
 415 * static void ftrace_raw_event_<call>(void *__data, proto)
 416 * {
 417 *      struct ftrace_event_call *event_call = __data;
 418 *      struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
 419 *      struct ring_buffer_event *event;
 420 *      struct ftrace_raw_<call> *entry; <-- defined in stage 1
 421 *      struct ring_buffer *buffer;
 422 *      unsigned long irq_flags;
 423 *      int __data_size;
 424 *      int pc;
 425 *
 426 *      local_save_flags(irq_flags);
 427 *      pc = preempt_count();
 428 *
 429 *      __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
 430 *
 431 *      event = trace_current_buffer_lock_reserve(&buffer,
 432 *                                event_<call>->event.type,
 433 *                                sizeof(*entry) + __data_size,
 434 *                                irq_flags, pc);
 435 *      if (!event)
 436 *              return;
 437 *      entry   = ring_buffer_event_data(event);
 438 *
 439 *      { <assign>; }  <-- Here we assign the entries by the __field and
 440 *                         __array macros.
 441 *
 442 *      if (!filter_current_check_discard(buffer, event_call, entry, event))
 443 *              trace_current_buffer_unlock_commit(buffer,
 444 *                                                 event, irq_flags, pc);
 445 * }
 446 *
 447 * static struct trace_event ftrace_event_type_<call> = {
 448 *      .trace                  = ftrace_raw_output_<call>, <-- stage 2
 449 * };
 450 *
 451 * static const char print_fmt_<call>[] = <TP_printk>;
 452 *
 453 * static struct ftrace_event_class __used event_class_<template> = {
 454 *      .system                 = "<system>",
 455 *      .define_fields          = ftrace_define_fields_<call>,
 456 *      .fields                 = LIST_HEAD_INIT(event_class_##call.fields),
 457 *      .raw_init               = trace_event_raw_init,
 458 *      .probe                  = ftrace_raw_event_##call,
 459 *      .reg                    = ftrace_event_reg,
 460 * };
 461 *
 462 * static struct ftrace_event_call event_<call> = {
 463 *      .name                   = "<call>",
 464 *      .class                  = event_class_<template>,
 465 *      .event                  = &ftrace_event_type_<call>,
 466 *      .print_fmt              = print_fmt_<call>,
 467 * };
 468 * // its only safe to use pointers when doing linker tricks to
 469 * // create an array.
 470 * static struct ftrace_event_call __used
 471 * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
 472 *
 473 */
 474
 475#ifdef CONFIG_PERF_EVENTS
 476
 477#define _TRACE_PERF_PROTO(call, proto)                                  \
 478        static notrace void                                             \
 479        perf_trace_##call(void *__data, proto);
 480
 481#define _TRACE_PERF_INIT(call)                                          \
 482        .perf_probe             = perf_trace_##call,
 483
 484#else
 485#define _TRACE_PERF_PROTO(call, proto)
 486#define _TRACE_PERF_INIT(call)
 487#endif /* CONFIG_PERF_EVENTS */
 488
 489#undef __entry
 490#define __entry entry
 491
 492#undef __field
 493#define __field(type, item)
 494
 495#undef __array
 496#define __array(type, item, len)
 497
 498#undef __dynamic_array
 499#define __dynamic_array(type, item, len)                                \
 500        __entry->__data_loc_##item = __data_offsets.item;
 501
 502#undef __string
 503#define __string(item, src) __dynamic_array(char, item, -1)             \
 504
 505#undef __assign_str
 506#define __assign_str(dst, src)                                          \
 507        strcpy(__get_str(dst), src);
 508
 509#undef TP_fast_assign
 510#define TP_fast_assign(args...) args
 511
 512#undef TP_perf_assign
 513#define TP_perf_assign(args...)
 514
 515#undef DECLARE_EVENT_CLASS
 516#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
 517                                                                        \
 518static notrace void                                                     \
 519ftrace_raw_event_##call(void *__data, proto)                            \
 520{                                                                       \
 521        struct ftrace_event_call *event_call = __data;                  \
 522        struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
 523        struct ring_buffer_event *event;                                \
 524        struct ftrace_raw_##call *entry;                                \
 525        struct ring_buffer *buffer;                                     \
 526        unsigned long irq_flags;                                        \
 527        int __data_size;                                                \
 528        int pc;                                                         \
 529                                                                        \
 530        local_save_flags(irq_flags);                                    \
 531        pc = preempt_count();                                           \
 532                                                                        \
 533        __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
 534                                                                        \
 535        event = trace_current_buffer_lock_reserve(&buffer,              \
 536                                 event_call->event.type,                \
 537                                 sizeof(*entry) + __data_size,          \
 538                                 irq_flags, pc);                        \
 539        if (!event)                                                     \
 540                return;                                                 \
 541        entry   = ring_buffer_event_data(event);                        \
 542                                                                        \
 543        tstruct                                                         \
 544                                                                        \
 545        { assign; }                                                     \
 546                                                                        \
 547        if (!filter_current_check_discard(buffer, event_call, entry, event)) \
 548                trace_nowake_buffer_unlock_commit(buffer,               \
 549                                                  event, irq_flags, pc); \
 550}
 551/*
 552 * The ftrace_test_probe is compiled out, it is only here as a build time check
 553 * to make sure that if the tracepoint handling changes, the ftrace probe will
 554 * fail to compile unless it too is updated.
 555 */
 556
 557#undef DEFINE_EVENT
 558#define DEFINE_EVENT(template, call, proto, args)                       \
 559static inline void ftrace_test_probe_##call(void)                       \
 560{                                                                       \
 561        check_trace_callback_type_##call(ftrace_raw_event_##template);  \
 562}
 563
 564#undef DEFINE_EVENT_PRINT
 565#define DEFINE_EVENT_PRINT(template, name, proto, args, print)
 566
 567#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 568
 569#undef __entry
 570#define __entry REC
 571
 572#undef __print_flags
 573#undef __print_symbolic
 574#undef __get_dynamic_array
 575#undef __get_str
 576
 577#undef TP_printk
 578#define TP_printk(fmt, args...) "\"" fmt "\", "  __stringify(args)
 579
 580#undef DECLARE_EVENT_CLASS
 581#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
 582_TRACE_PERF_PROTO(call, PARAMS(proto));                                 \
 583static const char print_fmt_##call[] = print;                           \
 584static struct ftrace_event_class __used event_class_##call = {          \
 585        .system                 = __stringify(TRACE_SYSTEM),            \
 586        .define_fields          = ftrace_define_fields_##call,          \
 587        .fields                 = LIST_HEAD_INIT(event_class_##call.fields),\
 588        .raw_init               = trace_event_raw_init,                 \
 589        .probe                  = ftrace_raw_event_##call,              \
 590        .reg                    = ftrace_event_reg,                     \
 591        _TRACE_PERF_INIT(call)                                          \
 592};
 593
 594#undef DEFINE_EVENT
 595#define DEFINE_EVENT(template, call, proto, args)                       \
 596                                                                        \
 597static struct ftrace_event_call __used event_##call = {                 \
 598        .name                   = #call,                                \
 599        .class                  = &event_class_##template,              \
 600        .event.funcs            = &ftrace_event_type_funcs_##template,  \
 601        .print_fmt              = print_fmt_##template,                 \
 602};                                                                      \
 603static struct ftrace_event_call __used                                  \
 604__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
 605
 606#undef DEFINE_EVENT_PRINT
 607#define DEFINE_EVENT_PRINT(template, call, proto, args, print)          \
 608                                                                        \
 609static const char print_fmt_##call[] = print;                           \
 610                                                                        \
 611static struct ftrace_event_call __used event_##call = {                 \
 612        .name                   = #call,                                \
 613        .class                  = &event_class_##template,              \
 614        .event.funcs            = &ftrace_event_type_funcs_##call,      \
 615        .print_fmt              = print_fmt_##call,                     \
 616};                                                                      \
 617static struct ftrace_event_call __used                                  \
 618__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
 619
 620#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 621
 622/*
 623 * Define the insertion callback to perf events
 624 *
 625 * The job is very similar to ftrace_raw_event_<call> except that we don't
 626 * insert in the ring buffer but in a perf counter.
 627 *
 628 * static void ftrace_perf_<call>(proto)
 629 * {
 630 *      struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
 631 *      struct ftrace_event_call *event_call = &event_<call>;
 632 *      extern void perf_tp_event(int, u64, u64, void *, int);
 633 *      struct ftrace_raw_##call *entry;
 634 *      struct perf_trace_buf *trace_buf;
 635 *      u64 __addr = 0, __count = 1;
 636 *      unsigned long irq_flags;
 637 *      struct trace_entry *ent;
 638 *      int __entry_size;
 639 *      int __data_size;
 640 *      int __cpu
 641 *      int pc;
 642 *
 643 *      pc = preempt_count();
 644 *
 645 *      __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
 646 *
 647 *      // Below we want to get the aligned size by taking into account
 648 *      // the u32 field that will later store the buffer size
 649 *      __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
 650 *                           sizeof(u64));
 651 *      __entry_size -= sizeof(u32);
 652 *
 653 *      // Protect the non nmi buffer
 654 *      // This also protects the rcu read side
 655 *      local_irq_save(irq_flags);
 656 *      __cpu = smp_processor_id();
 657 *
 658 *      if (in_nmi())
 659 *              trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
 660 *      else
 661 *              trace_buf = rcu_dereference_sched(perf_trace_buf);
 662 *
 663 *      if (!trace_buf)
 664 *              goto end;
 665 *
 666 *      trace_buf = per_cpu_ptr(trace_buf, __cpu);
 667 *
 668 *      // Avoid recursion from perf that could mess up the buffer
 669 *      if (trace_buf->recursion++)
 670 *              goto end_recursion;
 671 *
 672 *      raw_data = trace_buf->buf;
 673 *
 674 *      // Make recursion update visible before entering perf_tp_event
 675 *      // so that we protect from perf recursions.
 676 *
 677 *      barrier();
 678 *
 679 *      //zero dead bytes from alignment to avoid stack leak to userspace:
 680 *      *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
 681 *      entry = (struct ftrace_raw_<call> *)raw_data;
 682 *      ent = &entry->ent;
 683 *      tracing_generic_entry_update(ent, irq_flags, pc);
 684 *      ent->type = event_call->id;
 685 *
 686 *      <tstruct> <- do some jobs with dynamic arrays
 687 *
 688 *      <assign>  <- affect our values
 689 *
 690 *      perf_tp_event(event_call->id, __addr, __count, entry,
 691 *                   __entry_size);  <- submit them to perf counter
 692 *
 693 * }
 694 */
 695
 696#ifdef CONFIG_PERF_EVENTS
 697
 698#undef __entry
 699#define __entry entry
 700
 701#undef __get_dynamic_array
 702#define __get_dynamic_array(field)      \
 703                ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
 704
 705#undef __get_str
 706#define __get_str(field) (char *)__get_dynamic_array(field)
 707
 708#undef __perf_addr
 709#define __perf_addr(a) __addr = (a)
 710
 711#undef __perf_count
 712#define __perf_count(c) __count = (c)
 713
 714#undef TP_perf_assign
 715#define TP_perf_assign(args...) args
 716
 717#undef DECLARE_EVENT_CLASS
 718#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
 719static notrace void                                                     \
 720perf_trace_##call(void *__data, proto)                                  \
 721{                                                                       \
 722        struct ftrace_event_call *event_call = __data;                  \
 723        struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
 724        struct ftrace_raw_##call *entry;                                \
 725        struct pt_regs __regs;                                          \
 726        u64 __addr = 0, __count = 1;                                    \
 727        struct hlist_head *head;                                        \
 728        int __entry_size;                                               \
 729        int __data_size;                                                \
 730        int rctx;                                                       \
 731                                                                        \
 732        perf_fetch_caller_regs(&__regs);                                \
 733                                                                        \
 734        __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
 735        __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
 736                             sizeof(u64));                              \
 737        __entry_size -= sizeof(u32);                                    \
 738                                                                        \
 739        if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE,               \
 740                      "profile buffer not large enough"))               \
 741                return;                                                 \
 742                                                                        \
 743        entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare(     \
 744                __entry_size, event_call->event.type, &__regs, &rctx);  \
 745        if (!entry)                                                     \
 746                return;                                                 \
 747                                                                        \
 748        tstruct                                                         \
 749                                                                        \
 750        { assign; }                                                     \
 751                                                                        \
 752        head = this_cpu_ptr(event_call->perf_events);                   \
 753        perf_trace_buf_submit(entry, __entry_size, rctx, __addr,        \
 754                __count, &__regs, head);                                \
 755}
 756
 757/*
 758 * This part is compiled out, it is only here as a build time check
 759 * to make sure that if the tracepoint handling changes, the
 760 * perf probe will fail to compile unless it too is updated.
 761 */
 762#undef DEFINE_EVENT
 763#define DEFINE_EVENT(template, call, proto, args)                       \
 764static inline void perf_test_probe_##call(void)                         \
 765{                                                                       \
 766        check_trace_callback_type_##call(perf_trace_##template);        \
 767}
 768
 769
 770#undef DEFINE_EVENT_PRINT
 771#define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
 772        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
 773
 774#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 775#endif /* CONFIG_PERF_EVENTS */
 776
 777#undef _TRACE_PROFILE_INIT
 778
 779
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.