linux/kernel/events/callchain.c
<<
>>
Prefs
   1/*
   2 * Performance events callchain code, extracted from core.c:
   3 *
   4 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
   5 *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
   6 *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
   7 *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
   8 *
   9 * For licensing details see kernel-base/COPYING
  10 */
  11
  12#include <linux/perf_event.h>
  13#include <linux/slab.h>
  14#include "internal.h"
  15
  16struct callchain_cpus_entries {
  17        struct rcu_head                 rcu_head;
  18        struct perf_callchain_entry     *cpu_entries[0];
  19};
  20
  21static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
  22static atomic_t nr_callchain_events;
  23static DEFINE_MUTEX(callchain_mutex);
  24static struct callchain_cpus_entries *callchain_cpus_entries;
  25
  26
  27__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
  28                                  struct pt_regs *regs)
  29{
  30}
  31
  32__weak void perf_callchain_user(struct perf_callchain_entry *entry,
  33                                struct pt_regs *regs)
  34{
  35}
  36
  37static void release_callchain_buffers_rcu(struct rcu_head *head)
  38{
  39        struct callchain_cpus_entries *entries;
  40        int cpu;
  41
  42        entries = container_of(head, struct callchain_cpus_entries, rcu_head);
  43
  44        for_each_possible_cpu(cpu)
  45                kfree(entries->cpu_entries[cpu]);
  46
  47        kfree(entries);
  48}
  49
  50static void release_callchain_buffers(void)
  51{
  52        struct callchain_cpus_entries *entries;
  53
  54        entries = callchain_cpus_entries;
  55        rcu_assign_pointer(callchain_cpus_entries, NULL);
  56        call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
  57}
  58
  59static int alloc_callchain_buffers(void)
  60{
  61        int cpu;
  62        int size;
  63        struct callchain_cpus_entries *entries;
  64
  65        /*
  66         * We can't use the percpu allocation API for data that can be
  67         * accessed from NMI. Use a temporary manual per cpu allocation
  68         * until that gets sorted out.
  69         */
  70        size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
  71
  72        entries = kzalloc(size, GFP_KERNEL);
  73        if (!entries)
  74                return -ENOMEM;
  75
  76        size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
  77
  78        for_each_possible_cpu(cpu) {
  79                entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
  80                                                         cpu_to_node(cpu));
  81                if (!entries->cpu_entries[cpu])
  82                        goto fail;
  83        }
  84
  85        rcu_assign_pointer(callchain_cpus_entries, entries);
  86
  87        return 0;
  88
  89fail:
  90        for_each_possible_cpu(cpu)
  91                kfree(entries->cpu_entries[cpu]);
  92        kfree(entries);
  93
  94        return -ENOMEM;
  95}
  96
  97int get_callchain_buffers(void)
  98{
  99        int err = 0;
 100        int count;
 101
 102        mutex_lock(&callchain_mutex);
 103
 104        count = atomic_inc_return(&nr_callchain_events);
 105        if (WARN_ON_ONCE(count < 1)) {
 106                err = -EINVAL;
 107                goto exit;
 108        }
 109
 110        if (count > 1) {
 111                /* If the allocation failed, give up */
 112                if (!callchain_cpus_entries)
 113                        err = -ENOMEM;
 114                goto exit;
 115        }
 116
 117        err = alloc_callchain_buffers();
 118exit:
 119        mutex_unlock(&callchain_mutex);
 120
 121        return err;
 122}
 123
 124void put_callchain_buffers(void)
 125{
 126        if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
 127                release_callchain_buffers();
 128                mutex_unlock(&callchain_mutex);
 129        }
 130}
 131
 132static struct perf_callchain_entry *get_callchain_entry(int *rctx)
 133{
 134        int cpu;
 135        struct callchain_cpus_entries *entries;
 136
 137        *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
 138        if (*rctx == -1)
 139                return NULL;
 140
 141        entries = rcu_dereference(callchain_cpus_entries);
 142        if (!entries)
 143                return NULL;
 144
 145        cpu = smp_processor_id();
 146
 147        return &entries->cpu_entries[cpu][*rctx];
 148}
 149
 150static void
 151put_callchain_entry(int rctx)
 152{
 153        put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
 154}
 155
 156struct perf_callchain_entry *
 157perf_callchain(struct perf_event *event, struct pt_regs *regs)
 158{
 159        int rctx;
 160        struct perf_callchain_entry *entry;
 161
 162
 163        entry = get_callchain_entry(&rctx);
 164        if (rctx == -1)
 165                return NULL;
 166
 167        if (!entry)
 168                goto exit_put;
 169
 170        entry->nr = 0;
 171
 172        if (!user_mode(regs)) {
 173                perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
 174                perf_callchain_kernel(entry, regs);
 175                if (current->mm)
 176                        regs = task_pt_regs(current);
 177                else
 178                        regs = NULL;
 179        }
 180
 181        if (regs) {
 182                /*
 183                 * Disallow cross-task user callchains.
 184                 */
 185                if (event->ctx->task && event->ctx->task != current)
 186                        goto exit_put;
 187
 188                perf_callchain_store(entry, PERF_CONTEXT_USER);
 189                perf_callchain_user(entry, regs);
 190        }
 191
 192exit_put:
 193        put_callchain_entry(rctx);
 194
 195        return entry;
 196}
 197
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.