linux/include/linux/perf_event.h
<<
>>
Prefs
   1/*
   2 * Performance events:
   3 *
   4 *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
   5 *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
   6 *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
   7 *
   8 * Data type definitions, declarations, prototypes.
   9 *
  10 *    Started by: Thomas Gleixner and Ingo Molnar
  11 *
  12 * For licencing details see kernel-base/COPYING
  13 */
  14#ifndef _LINUX_PERF_EVENT_H
  15#define _LINUX_PERF_EVENT_H
  16
  17#include <uapi/linux/perf_event.h>
  18
  19/*
  20 * Kernel-internal data types and definitions:
  21 */
  22
  23#ifdef CONFIG_PERF_EVENTS
  24# include <linux/cgroup.h>
  25# include <asm/perf_event.h>
  26# include <asm/local64.h>
  27#endif
  28
  29struct perf_guest_info_callbacks {
  30        int                             (*is_in_guest)(void);
  31        int                             (*is_user_mode)(void);
  32        unsigned long                   (*get_guest_ip)(void);
  33};
  34
  35#ifdef CONFIG_HAVE_HW_BREAKPOINT
  36#include <asm/hw_breakpoint.h>
  37#endif
  38
  39#include <linux/list.h>
  40#include <linux/mutex.h>
  41#include <linux/rculist.h>
  42#include <linux/rcupdate.h>
  43#include <linux/spinlock.h>
  44#include <linux/hrtimer.h>
  45#include <linux/fs.h>
  46#include <linux/pid_namespace.h>
  47#include <linux/workqueue.h>
  48#include <linux/ftrace.h>
  49#include <linux/cpu.h>
  50#include <linux/irq_work.h>
  51#include <linux/static_key.h>
  52#include <linux/atomic.h>
  53#include <linux/sysfs.h>
  54#include <linux/perf_regs.h>
  55#include <asm/local.h>
  56
  57struct perf_callchain_entry {
  58        __u64                           nr;
  59        __u64                           ip[PERF_MAX_STACK_DEPTH];
  60};
  61
  62struct perf_raw_record {
  63        u32                             size;
  64        void                            *data;
  65};
  66
  67/*
  68 * single taken branch record layout:
  69 *
  70 *      from: source instruction (may not always be a branch insn)
  71 *        to: branch target
  72 *   mispred: branch target was mispredicted
  73 * predicted: branch target was predicted
  74 *
  75 * support for mispred, predicted is optional. In case it
  76 * is not supported mispred = predicted = 0.
  77 */
  78struct perf_branch_entry {
  79        __u64   from;
  80        __u64   to;
  81        __u64   mispred:1,  /* target mispredicted */
  82                predicted:1,/* target predicted */
  83                reserved:62;
  84};
  85
  86/*
  87 * branch stack layout:
  88 *  nr: number of taken branches stored in entries[]
  89 *
  90 * Note that nr can vary from sample to sample
  91 * branches (to, from) are stored from most recent
  92 * to least recent, i.e., entries[0] contains the most
  93 * recent branch.
  94 */
  95struct perf_branch_stack {
  96        __u64                           nr;
  97        struct perf_branch_entry        entries[0];
  98};
  99
 100struct perf_regs_user {
 101        __u64           abi;
 102        struct pt_regs  *regs;
 103};
 104
 105struct task_struct;
 106
 107/*
 108 * extra PMU register associated with an event
 109 */
 110struct hw_perf_event_extra {
 111        u64             config; /* register value */
 112        unsigned int    reg;    /* register address or index */
 113        int             alloc;  /* extra register already allocated */
 114        int             idx;    /* index in shared_regs->regs[] */
 115};
 116
 117/**
 118 * struct hw_perf_event - performance event hardware details:
 119 */
 120struct hw_perf_event {
 121#ifdef CONFIG_PERF_EVENTS
 122        union {
 123                struct { /* hardware */
 124                        u64             config;
 125                        u64             last_tag;
 126                        unsigned long   config_base;
 127                        unsigned long   event_base;
 128                        int             event_base_rdpmc;
 129                        int             idx;
 130                        int             last_cpu;
 131
 132                        struct hw_perf_event_extra extra_reg;
 133                        struct hw_perf_event_extra branch_reg;
 134                };
 135                struct { /* software */
 136                        struct hrtimer  hrtimer;
 137                };
 138                struct { /* tracepoint */
 139                        struct task_struct      *tp_target;
 140                        /* for tp_event->class */
 141                        struct list_head        tp_list;
 142                };
 143#ifdef CONFIG_HAVE_HW_BREAKPOINT
 144                struct { /* breakpoint */
 145                        /*
 146                         * Crufty hack to avoid the chicken and egg
 147                         * problem hw_breakpoint has with context
 148                         * creation and event initalization.
 149                         */
 150                        struct task_struct              *bp_target;
 151                        struct arch_hw_breakpoint       info;
 152                        struct list_head                bp_list;
 153                };
 154#endif
 155        };
 156        int                             state;
 157        local64_t                       prev_count;
 158        u64                             sample_period;
 159        u64                             last_period;
 160        local64_t                       period_left;
 161        u64                             interrupts_seq;
 162        u64                             interrupts;
 163
 164        u64                             freq_time_stamp;
 165        u64                             freq_count_stamp;
 166#endif
 167};
 168
 169/*
 170 * hw_perf_event::state flags
 171 */
 172#define PERF_HES_STOPPED        0x01 /* the counter is stopped */
 173#define PERF_HES_UPTODATE       0x02 /* event->count up-to-date */
 174#define PERF_HES_ARCH           0x04
 175
 176struct perf_event;
 177
 178/*
 179 * Common implementation detail of pmu::{start,commit,cancel}_txn
 180 */
 181#define PERF_EVENT_TXN 0x1
 182
 183/**
 184 * struct pmu - generic performance monitoring unit
 185 */
 186struct pmu {
 187        struct list_head                entry;
 188
 189        struct device                   *dev;
 190        const struct attribute_group    **attr_groups;
 191        char                            *name;
 192        int                             type;
 193
 194        int * __percpu                  pmu_disable_count;
 195        struct perf_cpu_context * __percpu pmu_cpu_context;
 196        int                             task_ctx_nr;
 197
 198        /*
 199         * Fully disable/enable this PMU, can be used to protect from the PMI
 200         * as well as for lazy/batch writing of the MSRs.
 201         */
 202        void (*pmu_enable)              (struct pmu *pmu); /* optional */
 203        void (*pmu_disable)             (struct pmu *pmu); /* optional */
 204
 205        /*
 206         * Try and initialize the event for this PMU.
 207         * Should return -ENOENT when the @event doesn't match this PMU.
 208         */
 209        int (*event_init)               (struct perf_event *event);
 210
 211#define PERF_EF_START   0x01            /* start the counter when adding    */
 212#define PERF_EF_RELOAD  0x02            /* reload the counter when starting */
 213#define PERF_EF_UPDATE  0x04            /* update the counter when stopping */
 214
 215        /*
 216         * Adds/Removes a counter to/from the PMU, can be done inside
 217         * a transaction, see the ->*_txn() methods.
 218         */
 219        int  (*add)                     (struct perf_event *event, int flags);
 220        void (*del)                     (struct perf_event *event, int flags);
 221
 222        /*
 223         * Starts/Stops a counter present on the PMU. The PMI handler
 224         * should stop the counter when perf_event_overflow() returns
 225         * !0. ->start() will be used to continue.
 226         */
 227        void (*start)                   (struct perf_event *event, int flags);
 228        void (*stop)                    (struct perf_event *event, int flags);
 229
 230        /*
 231         * Updates the counter value of the event.
 232         */
 233        void (*read)                    (struct perf_event *event);
 234
 235        /*
 236         * Group events scheduling is treated as a transaction, add
 237         * group events as a whole and perform one schedulability test.
 238         * If the test fails, roll back the whole group
 239         *
 240         * Start the transaction, after this ->add() doesn't need to
 241         * do schedulability tests.
 242         */
 243        void (*start_txn)               (struct pmu *pmu); /* optional */
 244        /*
 245         * If ->start_txn() disabled the ->add() schedulability test
 246         * then ->commit_txn() is required to perform one. On success
 247         * the transaction is closed. On error the transaction is kept
 248         * open until ->cancel_txn() is called.
 249         */
 250        int  (*commit_txn)              (struct pmu *pmu); /* optional */
 251        /*
 252         * Will cancel the transaction, assumes ->del() is called
 253         * for each successful ->add() during the transaction.
 254         */
 255        void (*cancel_txn)              (struct pmu *pmu); /* optional */
 256
 257        /*
 258         * Will return the value for perf_event_mmap_page::index for this event,
 259         * if no implementation is provided it will default to: event->hw.idx + 1.
 260         */
 261        int (*event_idx)                (struct perf_event *event); /*optional */
 262
 263        /*
 264         * flush branch stack on context-switches (needed in cpu-wide mode)
 265         */
 266        void (*flush_branch_stack)      (void);
 267};
 268
 269/**
 270 * enum perf_event_active_state - the states of a event
 271 */
 272enum perf_event_active_state {
 273        PERF_EVENT_STATE_ERROR          = -2,
 274        PERF_EVENT_STATE_OFF            = -1,
 275        PERF_EVENT_STATE_INACTIVE       =  0,
 276        PERF_EVENT_STATE_ACTIVE         =  1,
 277};
 278
 279struct file;
 280struct perf_sample_data;
 281
 282typedef void (*perf_overflow_handler_t)(struct perf_event *,
 283                                        struct perf_sample_data *,
 284                                        struct pt_regs *regs);
 285
 286enum perf_group_flag {
 287        PERF_GROUP_SOFTWARE             = 0x1,
 288};
 289
 290#define SWEVENT_HLIST_BITS              8
 291#define SWEVENT_HLIST_SIZE              (1 << SWEVENT_HLIST_BITS)
 292
 293struct swevent_hlist {
 294        struct hlist_head               heads[SWEVENT_HLIST_SIZE];
 295        struct rcu_head                 rcu_head;
 296};
 297
 298#define PERF_ATTACH_CONTEXT     0x01
 299#define PERF_ATTACH_GROUP       0x02
 300#define PERF_ATTACH_TASK        0x04
 301
 302#ifdef CONFIG_CGROUP_PERF
 303/*
 304 * perf_cgroup_info keeps track of time_enabled for a cgroup.
 305 * This is a per-cpu dynamically allocated data structure.
 306 */
 307struct perf_cgroup_info {
 308        u64                             time;
 309        u64                             timestamp;
 310};
 311
 312struct perf_cgroup {
 313        struct                          cgroup_subsys_state css;
 314        struct                          perf_cgroup_info *info; /* timing info, one per cpu */
 315};
 316#endif
 317
 318struct ring_buffer;
 319
 320/**
 321 * struct perf_event - performance event kernel representation:
 322 */
 323struct perf_event {
 324#ifdef CONFIG_PERF_EVENTS
 325        struct list_head                group_entry;
 326        struct list_head                event_entry;
 327        struct list_head                sibling_list;
 328        struct hlist_node               hlist_entry;
 329        int                             nr_siblings;
 330        int                             group_flags;
 331        struct perf_event               *group_leader;
 332        struct pmu                      *pmu;
 333
 334        enum perf_event_active_state    state;
 335        unsigned int                    attach_state;
 336        local64_t                       count;
 337        atomic64_t                      child_count;
 338
 339        /*
 340         * These are the total time in nanoseconds that the event
 341         * has been enabled (i.e. eligible to run, and the task has
 342         * been scheduled in, if this is a per-task event)
 343         * and running (scheduled onto the CPU), respectively.
 344         *
 345         * They are computed from tstamp_enabled, tstamp_running and
 346         * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
 347         */
 348        u64                             total_time_enabled;
 349        u64                             total_time_running;
 350
 351        /*
 352         * These are timestamps used for computing total_time_enabled
 353         * and total_time_running when the event is in INACTIVE or
 354         * ACTIVE state, measured in nanoseconds from an arbitrary point
 355         * in time.
 356         * tstamp_enabled: the notional time when the event was enabled
 357         * tstamp_running: the notional time when the event was scheduled on
 358         * tstamp_stopped: in INACTIVE state, the notional time when the
 359         *      event was scheduled off.
 360         */
 361        u64                             tstamp_enabled;
 362        u64                             tstamp_running;
 363        u64                             tstamp_stopped;
 364
 365        /*
 366         * timestamp shadows the actual context timing but it can
 367         * be safely used in NMI interrupt context. It reflects the
 368         * context time as it was when the event was last scheduled in.
 369         *
 370         * ctx_time already accounts for ctx->timestamp. Therefore to
 371         * compute ctx_time for a sample, simply add perf_clock().
 372         */
 373        u64                             shadow_ctx_time;
 374
 375        struct perf_event_attr          attr;
 376        u16                             header_size;
 377        u16                             id_header_size;
 378        u16                             read_size;
 379        struct hw_perf_event            hw;
 380
 381        struct perf_event_context       *ctx;
 382        atomic_long_t                   refcount;
 383
 384        /*
 385         * These accumulate total time (in nanoseconds) that children
 386         * events have been enabled and running, respectively.
 387         */
 388        atomic64_t                      child_total_time_enabled;
 389        atomic64_t                      child_total_time_running;
 390
 391        /*
 392         * Protect attach/detach and child_list:
 393         */
 394        struct mutex                    child_mutex;
 395        struct list_head                child_list;
 396        struct perf_event               *parent;
 397
 398        int                             oncpu;
 399        int                             cpu;
 400
 401        struct list_head                owner_entry;
 402        struct task_struct              *owner;
 403
 404        /* mmap bits */
 405        struct mutex                    mmap_mutex;
 406        atomic_t                        mmap_count;
 407        int                             mmap_locked;
 408        struct user_struct              *mmap_user;
 409        struct ring_buffer              *rb;
 410        struct list_head                rb_entry;
 411
 412        /* poll related */
 413        wait_queue_head_t               waitq;
 414        struct fasync_struct            *fasync;
 415
 416        /* delayed work for NMIs and such */
 417        int                             pending_wakeup;
 418        int                             pending_kill;
 419        int                             pending_disable;
 420        struct irq_work                 pending;
 421
 422        atomic_t                        event_limit;
 423
 424        void (*destroy)(struct perf_event *);
 425        struct rcu_head                 rcu_head;
 426
 427        struct pid_namespace            *ns;
 428        u64                             id;
 429
 430        perf_overflow_handler_t         overflow_handler;
 431        void                            *overflow_handler_context;
 432
 433#ifdef CONFIG_EVENT_TRACING
 434        struct ftrace_event_call        *tp_event;
 435        struct event_filter             *filter;
 436#ifdef CONFIG_FUNCTION_TRACER
 437        struct ftrace_ops               ftrace_ops;
 438#endif
 439#endif
 440
 441#ifdef CONFIG_CGROUP_PERF
 442        struct perf_cgroup              *cgrp; /* cgroup event is attach to */
 443        int                             cgrp_defer_enabled;
 444#endif
 445
 446#endif /* CONFIG_PERF_EVENTS */
 447};
 448
 449enum perf_event_context_type {
 450        task_context,
 451        cpu_context,
 452};
 453
 454/**
 455 * struct perf_event_context - event context structure
 456 *
 457 * Used as a container for task events and CPU events as well:
 458 */
 459struct perf_event_context {
 460        struct pmu                      *pmu;
 461        enum perf_event_context_type    type;
 462        /*
 463         * Protect the states of the events in the list,
 464         * nr_active, and the list:
 465         */
 466        raw_spinlock_t                  lock;
 467        /*
 468         * Protect the list of events.  Locking either mutex or lock
 469         * is sufficient to ensure the list doesn't change; to change
 470         * the list you need to lock both the mutex and the spinlock.
 471         */
 472        struct mutex                    mutex;
 473
 474        struct list_head                pinned_groups;
 475        struct list_head                flexible_groups;
 476        struct list_head                event_list;
 477        int                             nr_events;
 478        int                             nr_active;
 479        int                             is_active;
 480        int                             nr_stat;
 481        int                             nr_freq;
 482        int                             rotate_disable;
 483        atomic_t                        refcount;
 484        struct task_struct              *task;
 485
 486        /*
 487         * Context clock, runs when context enabled.
 488         */
 489        u64                             time;
 490        u64                             timestamp;
 491
 492        /*
 493         * These fields let us detect when two contexts have both
 494         * been cloned (inherited) from a common ancestor.
 495         */
 496        struct perf_event_context       *parent_ctx;
 497        u64                             parent_gen;
 498        u64                             generation;
 499        int                             pin_count;
 500        int                             nr_cgroups;      /* cgroup evts */
 501        int                             nr_branch_stack; /* branch_stack evt */
 502        struct rcu_head                 rcu_head;
 503};
 504
 505/*
 506 * Number of contexts where an event can trigger:
 507 *      task, softirq, hardirq, nmi.
 508 */
 509#define PERF_NR_CONTEXTS        4
 510
 511/**
 512 * struct perf_event_cpu_context - per cpu event context structure
 513 */
 514struct perf_cpu_context {
 515        struct perf_event_context       ctx;
 516        struct perf_event_context       *task_ctx;
 517        int                             active_oncpu;
 518        int                             exclusive;
 519        struct list_head                rotation_list;
 520        int                             jiffies_interval;
 521        struct pmu                      *unique_pmu;
 522        struct perf_cgroup              *cgrp;
 523};
 524
 525struct perf_output_handle {
 526        struct perf_event               *event;
 527        struct ring_buffer              *rb;
 528        unsigned long                   wakeup;
 529        unsigned long                   size;
 530        void                            *addr;
 531        int                             page;
 532};
 533
 534#ifdef CONFIG_PERF_EVENTS
 535
 536extern int perf_pmu_register(struct pmu *pmu, char *name, int type);
 537extern void perf_pmu_unregister(struct pmu *pmu);
 538
 539extern int perf_num_counters(void);
 540extern const char *perf_pmu_name(void);
 541extern void __perf_event_task_sched_in(struct task_struct *prev,
 542                                       struct task_struct *task);
 543extern void __perf_event_task_sched_out(struct task_struct *prev,
 544                                        struct task_struct *next);
 545extern int perf_event_init_task(struct task_struct *child);
 546extern void perf_event_exit_task(struct task_struct *child);
 547extern void perf_event_free_task(struct task_struct *task);
 548extern void perf_event_delayed_put(struct task_struct *task);
 549extern void perf_event_print_debug(void);
 550extern void perf_pmu_disable(struct pmu *pmu);
 551extern void perf_pmu_enable(struct pmu *pmu);
 552extern int perf_event_task_disable(void);
 553extern int perf_event_task_enable(void);
 554extern int perf_event_refresh(struct perf_event *event, int refresh);
 555extern void perf_event_update_userpage(struct perf_event *event);
 556extern int perf_event_release_kernel(struct perf_event *event);
 557extern struct perf_event *
 558perf_event_create_kernel_counter(struct perf_event_attr *attr,
 559                                int cpu,
 560                                struct task_struct *task,
 561                                perf_overflow_handler_t callback,
 562                                void *context);
 563extern void perf_pmu_migrate_context(struct pmu *pmu,
 564                                int src_cpu, int dst_cpu);
 565extern u64 perf_event_read_value(struct perf_event *event,
 566                                 u64 *enabled, u64 *running);
 567
 568
 569struct perf_sample_data {
 570        u64                             type;
 571
 572        u64                             ip;
 573        struct {
 574                u32     pid;
 575                u32     tid;
 576        }                               tid_entry;
 577        u64                             time;
 578        u64                             addr;
 579        u64                             id;
 580        u64                             stream_id;
 581        struct {
 582                u32     cpu;
 583                u32     reserved;
 584        }                               cpu_entry;
 585        u64                             period;
 586        struct perf_callchain_entry     *callchain;
 587        struct perf_raw_record          *raw;
 588        struct perf_branch_stack        *br_stack;
 589        struct perf_regs_user           regs_user;
 590        u64                             stack_user_size;
 591};
 592
 593static inline void perf_sample_data_init(struct perf_sample_data *data,
 594                                         u64 addr, u64 period)
 595{
 596        /* remaining struct members initialized in perf_prepare_sample() */
 597        data->addr = addr;
 598        data->raw  = NULL;
 599        data->br_stack = NULL;
 600        data->period = period;
 601        data->regs_user.abi = PERF_SAMPLE_REGS_ABI_NONE;
 602        data->regs_user.regs = NULL;
 603        data->stack_user_size = 0;
 604}
 605
 606extern void perf_output_sample(struct perf_output_handle *handle,
 607                               struct perf_event_header *header,
 608                               struct perf_sample_data *data,
 609                               struct perf_event *event);
 610extern void perf_prepare_sample(struct perf_event_header *header,
 611                                struct perf_sample_data *data,
 612                                struct perf_event *event,
 613                                struct pt_regs *regs);
 614
 615extern int perf_event_overflow(struct perf_event *event,
 616                                 struct perf_sample_data *data,
 617                                 struct pt_regs *regs);
 618
 619static inline bool is_sampling_event(struct perf_event *event)
 620{
 621        return event->attr.sample_period != 0;
 622}
 623
 624/*
 625 * Return 1 for a software event, 0 for a hardware event
 626 */
 627static inline int is_software_event(struct perf_event *event)
 628{
 629        return event->pmu->task_ctx_nr == perf_sw_context;
 630}
 631
 632extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
 633
 634extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
 635
 636#ifndef perf_arch_fetch_caller_regs
 637static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
 638#endif
 639
 640/*
 641 * Take a snapshot of the regs. Skip ip and frame pointer to
 642 * the nth caller. We only need a few of the regs:
 643 * - ip for PERF_SAMPLE_IP
 644 * - cs for user_mode() tests
 645 * - bp for callchains
 646 * - eflags, for future purposes, just in case
 647 */
 648static inline void perf_fetch_caller_regs(struct pt_regs *regs)
 649{
 650        memset(regs, 0, sizeof(*regs));
 651
 652        perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
 653}
 654
 655static __always_inline void
 656perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 657{
 658        struct pt_regs hot_regs;
 659
 660        if (static_key_false(&perf_swevent_enabled[event_id])) {
 661                if (!regs) {
 662                        perf_fetch_caller_regs(&hot_regs);
 663                        regs = &hot_regs;
 664                }
 665                __perf_sw_event(event_id, nr, regs, addr);
 666        }
 667}
 668
 669extern struct static_key_deferred perf_sched_events;
 670
 671static inline void perf_event_task_sched_in(struct task_struct *prev,
 672                                            struct task_struct *task)
 673{
 674        if (static_key_false(&perf_sched_events.key))
 675                __perf_event_task_sched_in(prev, task);
 676}
 677
 678static inline void perf_event_task_sched_out(struct task_struct *prev,
 679                                             struct task_struct *next)
 680{
 681        perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
 682
 683        if (static_key_false(&perf_sched_events.key))
 684                __perf_event_task_sched_out(prev, next);
 685}
 686
 687extern void perf_event_mmap(struct vm_area_struct *vma);
 688extern struct perf_guest_info_callbacks *perf_guest_cbs;
 689extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
 690extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
 691
 692extern void perf_event_comm(struct task_struct *tsk);
 693extern void perf_event_fork(struct task_struct *tsk);
 694
 695/* Callchains */
 696DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
 697
 698extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
 699extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
 700
 701static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
 702{
 703        if (entry->nr < PERF_MAX_STACK_DEPTH)
 704                entry->ip[entry->nr++] = ip;
 705}
 706
 707extern int sysctl_perf_event_paranoid;
 708extern int sysctl_perf_event_mlock;
 709extern int sysctl_perf_event_sample_rate;
 710
 711extern int perf_proc_update_handler(struct ctl_table *table, int write,
 712                void __user *buffer, size_t *lenp,
 713                loff_t *ppos);
 714
 715static inline bool perf_paranoid_tracepoint_raw(void)
 716{
 717        return sysctl_perf_event_paranoid > -1;
 718}
 719
 720static inline bool perf_paranoid_cpu(void)
 721{
 722        return sysctl_perf_event_paranoid > 0;
 723}
 724
 725static inline bool perf_paranoid_kernel(void)
 726{
 727        return sysctl_perf_event_paranoid > 1;
 728}
 729
 730extern void perf_event_init(void);
 731extern void perf_tp_event(u64 addr, u64 count, void *record,
 732                          int entry_size, struct pt_regs *regs,
 733                          struct hlist_head *head, int rctx,
 734                          struct task_struct *task);
 735extern void perf_bp_event(struct perf_event *event, void *data);
 736
 737#ifndef perf_misc_flags
 738# define perf_misc_flags(regs) \
 739                (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
 740# define perf_instruction_pointer(regs) instruction_pointer(regs)
 741#endif
 742
 743static inline bool has_branch_stack(struct perf_event *event)
 744{
 745        return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
 746}
 747
 748extern int perf_output_begin(struct perf_output_handle *handle,
 749                             struct perf_event *event, unsigned int size);
 750extern void perf_output_end(struct perf_output_handle *handle);
 751extern unsigned int perf_output_copy(struct perf_output_handle *handle,
 752                             const void *buf, unsigned int len);
 753extern unsigned int perf_output_skip(struct perf_output_handle *handle,
 754                                     unsigned int len);
 755extern int perf_swevent_get_recursion_context(void);
 756extern void perf_swevent_put_recursion_context(int rctx);
 757extern void perf_event_enable(struct perf_event *event);
 758extern void perf_event_disable(struct perf_event *event);
 759extern int __perf_event_disable(void *info);
 760extern void perf_event_task_tick(void);
 761#else
 762static inline void
 763perf_event_task_sched_in(struct task_struct *prev,
 764                         struct task_struct *task)                      { }
 765static inline void
 766perf_event_task_sched_out(struct task_struct *prev,
 767                          struct task_struct *next)                     { }
 768static inline int perf_event_init_task(struct task_struct *child)       { return 0; }
 769static inline void perf_event_exit_task(struct task_struct *child)      { }
 770static inline void perf_event_free_task(struct task_struct *task)       { }
 771static inline void perf_event_delayed_put(struct task_struct *task)     { }
 772static inline void perf_event_print_debug(void)                         { }
 773static inline int perf_event_task_disable(void)                         { return -EINVAL; }
 774static inline int perf_event_task_enable(void)                          { return -EINVAL; }
 775static inline int perf_event_refresh(struct perf_event *event, int refresh)
 776{
 777        return -EINVAL;
 778}
 779
 780static inline void
 781perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)     { }
 782static inline void
 783perf_bp_event(struct perf_event *event, void *data)                     { }
 784
 785static inline int perf_register_guest_info_callbacks
 786(struct perf_guest_info_callbacks *callbacks)                           { return 0; }
 787static inline int perf_unregister_guest_info_callbacks
 788(struct perf_guest_info_callbacks *callbacks)                           { return 0; }
 789
 790static inline void perf_event_mmap(struct vm_area_struct *vma)          { }
 791static inline void perf_event_comm(struct task_struct *tsk)             { }
 792static inline void perf_event_fork(struct task_struct *tsk)             { }
 793static inline void perf_event_init(void)                                { }
 794static inline int  perf_swevent_get_recursion_context(void)             { return -1; }
 795static inline void perf_swevent_put_recursion_context(int rctx)         { }
 796static inline void perf_event_enable(struct perf_event *event)          { }
 797static inline void perf_event_disable(struct perf_event *event)         { }
 798static inline int __perf_event_disable(void *info)                      { return -1; }
 799static inline void perf_event_task_tick(void)                           { }
 800#endif
 801
 802#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
 803extern void perf_restore_debug_store(void);
 804#else
 805static inline void perf_restore_debug_store(void)                       { }
 806#endif
 807
 808#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
 809
 810/*
 811 * This has to have a higher priority than migration_notifier in sched.c.
 812 */
 813#define perf_cpu_notifier(fn)                                           \
 814do {                                                                    \
 815        static struct notifier_block fn##_nb __cpuinitdata =            \
 816                { .notifier_call = fn, .priority = CPU_PRI_PERF };      \
 817        unsigned long cpu = smp_processor_id();                         \
 818        unsigned long flags;                                            \
 819        fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,                     \
 820                (void *)(unsigned long)cpu);                            \
 821        local_irq_save(flags);                                          \
 822        fn(&fn##_nb, (unsigned long)CPU_STARTING,                       \
 823                (void *)(unsigned long)cpu);                            \
 824        local_irq_restore(flags);                                       \
 825        fn(&fn##_nb, (unsigned long)CPU_ONLINE,                         \
 826                (void *)(unsigned long)cpu);                            \
 827        register_cpu_notifier(&fn##_nb);                                \
 828} while (0)
 829
 830
 831struct perf_pmu_events_attr {
 832        struct device_attribute attr;
 833        u64 id;
 834};
 835
 836#define PMU_EVENT_ATTR(_name, _var, _id, _show)                         \
 837static struct perf_pmu_events_attr _var = {                             \
 838        .attr = __ATTR(_name, 0444, _show, NULL),                       \
 839        .id   =  _id,                                                   \
 840};
 841
 842#define PMU_FORMAT_ATTR(_name, _format)                                 \
 843static ssize_t                                                          \
 844_name##_show(struct device *dev,                                        \
 845                               struct device_attribute *attr,           \
 846                               char *page)                              \
 847{                                                                       \
 848        BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);                     \
 849        return sprintf(page, _format "\n");                             \
 850}                                                                       \
 851                                                                        \
 852static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
 853
 854#endif /* _LINUX_PERF_EVENT_H */
 855
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.