linux/kernel/events/internal.h
<<
>>
Prefs
   1#ifndef _KERNEL_EVENTS_INTERNAL_H
   2#define _KERNEL_EVENTS_INTERNAL_H
   3
   4#include <linux/hardirq.h>
   5#include <linux/uaccess.h>
   6
   7/* Buffer handling */
   8
   9#define RING_BUFFER_WRITABLE            0x01
  10
  11struct ring_buffer {
  12        atomic_t                        refcount;
  13        struct rcu_head                 rcu_head;
  14#ifdef CONFIG_PERF_USE_VMALLOC
  15        struct work_struct              work;
  16        int                             page_order;     /* allocation order  */
  17#endif
  18        int                             nr_pages;       /* nr of data pages  */
  19        int                             writable;       /* are we writable   */
  20
  21        atomic_t                        poll;           /* POLL_ for wakeups */
  22
  23        local_t                         head;           /* write position    */
  24        local_t                         nest;           /* nested writers    */
  25        local_t                         events;         /* event limit       */
  26        local_t                         wakeup;         /* wakeup stamp      */
  27        local_t                         lost;           /* nr records lost   */
  28
  29        long                            watermark;      /* wakeup watermark  */
  30        /* poll crap */
  31        spinlock_t                      event_lock;
  32        struct list_head                event_list;
  33
  34        struct perf_event_mmap_page     *user_page;
  35        void                            *data_pages[0];
  36};
  37
  38extern void rb_free(struct ring_buffer *rb);
  39extern struct ring_buffer *
  40rb_alloc(int nr_pages, long watermark, int cpu, int flags);
  41extern void perf_event_wakeup(struct perf_event *event);
  42
  43extern void
  44perf_event_header__init_id(struct perf_event_header *header,
  45                           struct perf_sample_data *data,
  46                           struct perf_event *event);
  47extern void
  48perf_event__output_id_sample(struct perf_event *event,
  49                             struct perf_output_handle *handle,
  50                             struct perf_sample_data *sample);
  51
  52extern struct page *
  53perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
  54
  55#ifdef CONFIG_PERF_USE_VMALLOC
  56/*
  57 * Back perf_mmap() with vmalloc memory.
  58 *
  59 * Required for architectures that have d-cache aliasing issues.
  60 */
  61
  62static inline int page_order(struct ring_buffer *rb)
  63{
  64        return rb->page_order;
  65}
  66
  67#else
  68
  69static inline int page_order(struct ring_buffer *rb)
  70{
  71        return 0;
  72}
  73#endif
  74
  75static inline unsigned long perf_data_size(struct ring_buffer *rb)
  76{
  77        return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
  78}
  79
  80#define DEFINE_OUTPUT_COPY(func_name, memcpy_func)                      \
  81static inline unsigned int                                              \
  82func_name(struct perf_output_handle *handle,                            \
  83          const void *buf, unsigned int len)                            \
  84{                                                                       \
  85        unsigned long size, written;                                    \
  86                                                                        \
  87        do {                                                            \
  88                size = min_t(unsigned long, handle->size, len);         \
  89                                                                        \
  90                written = memcpy_func(handle->addr, buf, size);         \
  91                                                                        \
  92                len -= written;                                         \
  93                handle->addr += written;                                \
  94                buf += written;                                         \
  95                handle->size -= written;                                \
  96                if (!handle->size) {                                    \
  97                        struct ring_buffer *rb = handle->rb;            \
  98                                                                        \
  99                        handle->page++;                                 \
 100                        handle->page &= rb->nr_pages - 1;               \
 101                        handle->addr = rb->data_pages[handle->page];    \
 102                        handle->size = PAGE_SIZE << page_order(rb);     \
 103                }                                                       \
 104        } while (len && written == size);                               \
 105                                                                        \
 106        return len;                                                     \
 107}
 108
 109static inline int memcpy_common(void *dst, const void *src, size_t n)
 110{
 111        memcpy(dst, src, n);
 112        return n;
 113}
 114
 115DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
 116
 117#define MEMCPY_SKIP(dst, src, n) (n)
 118
 119DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP)
 120
 121#ifndef arch_perf_out_copy_user
 122#define arch_perf_out_copy_user __copy_from_user_inatomic
 123#endif
 124
 125DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
 126
 127/* Callchain handling */
 128extern struct perf_callchain_entry *
 129perf_callchain(struct perf_event *event, struct pt_regs *regs);
 130extern int get_callchain_buffers(void);
 131extern void put_callchain_buffers(void);
 132
 133static inline int get_recursion_context(int *recursion)
 134{
 135        int rctx;
 136
 137        if (in_nmi())
 138                rctx = 3;
 139        else if (in_irq())
 140                rctx = 2;
 141        else if (in_softirq())
 142                rctx = 1;
 143        else
 144                rctx = 0;
 145
 146        if (recursion[rctx])
 147                return -1;
 148
 149        recursion[rctx]++;
 150        barrier();
 151
 152        return rctx;
 153}
 154
 155static inline void put_recursion_context(int *recursion, int rctx)
 156{
 157        barrier();
 158        recursion[rctx]--;
 159}
 160
 161#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
 162static inline bool arch_perf_have_user_stack_dump(void)
 163{
 164        return true;
 165}
 166
 167#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
 168#else
 169static inline bool arch_perf_have_user_stack_dump(void)
 170{
 171        return false;
 172}
 173
 174#define perf_user_stack_pointer(regs) 0
 175#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
 176
 177#endif /* _KERNEL_EVENTS_INTERNAL_H */
 178
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.