linux/kernel/trace/trace_sched_switch.c
<<
>>
Prefs
   1/*
   2 * trace context switch
   3 *
   4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
   5 *
   6 */
   7#include <linux/module.h>
   8#include <linux/fs.h>
   9#include <linux/debugfs.h>
  10#include <linux/kallsyms.h>
  11#include <linux/uaccess.h>
  12#include <linux/ftrace.h>
  13#include <trace/events/sched.h>
  14
  15#include "trace.h"
  16
  17static struct trace_array       *ctx_trace;
  18static int __read_mostly        tracer_enabled;
  19static int                      sched_ref;
  20static DEFINE_MUTEX(sched_register_mutex);
  21static int                      sched_stopped;
  22
  23
  24void
  25tracing_sched_switch_trace(struct trace_array *tr,
  26                           struct task_struct *prev,
  27                           struct task_struct *next,
  28                           unsigned long flags, int pc)
  29{
  30        struct ftrace_event_call *call = &event_context_switch;
  31        struct ring_buffer *buffer = tr->buffer;
  32        struct ring_buffer_event *event;
  33        struct ctx_switch_entry *entry;
  34
  35        event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
  36                                          sizeof(*entry), flags, pc);
  37        if (!event)
  38                return;
  39        entry   = ring_buffer_event_data(event);
  40        entry->prev_pid                 = prev->pid;
  41        entry->prev_prio                = prev->prio;
  42        entry->prev_state               = prev->state;
  43        entry->next_pid                 = next->pid;
  44        entry->next_prio                = next->prio;
  45        entry->next_state               = next->state;
  46        entry->next_cpu = task_cpu(next);
  47
  48        if (!filter_check_discard(call, entry, buffer, event))
  49                trace_buffer_unlock_commit(buffer, event, flags, pc);
  50}
  51
  52static void
  53probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next)
  54{
  55        struct trace_array_cpu *data;
  56        unsigned long flags;
  57        int cpu;
  58        int pc;
  59
  60        if (unlikely(!sched_ref))
  61                return;
  62
  63        tracing_record_cmdline(prev);
  64        tracing_record_cmdline(next);
  65
  66        if (!tracer_enabled || sched_stopped)
  67                return;
  68
  69        pc = preempt_count();
  70        local_irq_save(flags);
  71        cpu = raw_smp_processor_id();
  72        data = ctx_trace->data[cpu];
  73
  74        if (likely(!atomic_read(&data->disabled)))
  75                tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
  76
  77        local_irq_restore(flags);
  78}
  79
  80void
  81tracing_sched_wakeup_trace(struct trace_array *tr,
  82                           struct task_struct *wakee,
  83                           struct task_struct *curr,
  84                           unsigned long flags, int pc)
  85{
  86        struct ftrace_event_call *call = &event_wakeup;
  87        struct ring_buffer_event *event;
  88        struct ctx_switch_entry *entry;
  89        struct ring_buffer *buffer = tr->buffer;
  90
  91        event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
  92                                          sizeof(*entry), flags, pc);
  93        if (!event)
  94                return;
  95        entry   = ring_buffer_event_data(event);
  96        entry->prev_pid                 = curr->pid;
  97        entry->prev_prio                = curr->prio;
  98        entry->prev_state               = curr->state;
  99        entry->next_pid                 = wakee->pid;
 100        entry->next_prio                = wakee->prio;
 101        entry->next_state               = wakee->state;
 102        entry->next_cpu                 = task_cpu(wakee);
 103
 104        if (!filter_check_discard(call, entry, buffer, event))
 105                trace_buffer_unlock_commit(buffer, event, flags, pc);
 106}
 107
 108static void
 109probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
 110{
 111        struct trace_array_cpu *data;
 112        unsigned long flags;
 113        int cpu, pc;
 114
 115        if (unlikely(!sched_ref))
 116                return;
 117
 118        tracing_record_cmdline(current);
 119
 120        if (!tracer_enabled || sched_stopped)
 121                return;
 122
 123        pc = preempt_count();
 124        local_irq_save(flags);
 125        cpu = raw_smp_processor_id();
 126        data = ctx_trace->data[cpu];
 127
 128        if (likely(!atomic_read(&data->disabled)))
 129                tracing_sched_wakeup_trace(ctx_trace, wakee, current,
 130                                           flags, pc);
 131
 132        local_irq_restore(flags);
 133}
 134
 135static int tracing_sched_register(void)
 136{
 137        int ret;
 138
 139        ret = register_trace_sched_wakeup(probe_sched_wakeup, NULL);
 140        if (ret) {
 141                pr_info("wakeup trace: Couldn't activate tracepoint"
 142                        " probe to kernel_sched_wakeup\n");
 143                return ret;
 144        }
 145
 146        ret = register_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
 147        if (ret) {
 148                pr_info("wakeup trace: Couldn't activate tracepoint"
 149                        " probe to kernel_sched_wakeup_new\n");
 150                goto fail_deprobe;
 151        }
 152
 153        ret = register_trace_sched_switch(probe_sched_switch, NULL);
 154        if (ret) {
 155                pr_info("sched trace: Couldn't activate tracepoint"
 156                        " probe to kernel_sched_switch\n");
 157                goto fail_deprobe_wake_new;
 158        }
 159
 160        return ret;
 161fail_deprobe_wake_new:
 162        unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
 163fail_deprobe:
 164        unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
 165        return ret;
 166}
 167
 168static void tracing_sched_unregister(void)
 169{
 170        unregister_trace_sched_switch(probe_sched_switch, NULL);
 171        unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
 172        unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
 173}
 174
 175static void tracing_start_sched_switch(void)
 176{
 177        mutex_lock(&sched_register_mutex);
 178        if (!(sched_ref++))
 179                tracing_sched_register();
 180        mutex_unlock(&sched_register_mutex);
 181}
 182
 183static void tracing_stop_sched_switch(void)
 184{
 185        mutex_lock(&sched_register_mutex);
 186        if (!(--sched_ref))
 187                tracing_sched_unregister();
 188        mutex_unlock(&sched_register_mutex);
 189}
 190
 191void tracing_start_cmdline_record(void)
 192{
 193        tracing_start_sched_switch();
 194}
 195
 196void tracing_stop_cmdline_record(void)
 197{
 198        tracing_stop_sched_switch();
 199}
 200
 201/**
 202 * tracing_start_sched_switch_record - start tracing context switches
 203 *
 204 * Turns on context switch tracing for a tracer.
 205 */
 206void tracing_start_sched_switch_record(void)
 207{
 208        if (unlikely(!ctx_trace)) {
 209                WARN_ON(1);
 210                return;
 211        }
 212
 213        tracing_start_sched_switch();
 214
 215        mutex_lock(&sched_register_mutex);
 216        tracer_enabled++;
 217        mutex_unlock(&sched_register_mutex);
 218}
 219
 220/**
 221 * tracing_stop_sched_switch_record - start tracing context switches
 222 *
 223 * Turns off context switch tracing for a tracer.
 224 */
 225void tracing_stop_sched_switch_record(void)
 226{
 227        mutex_lock(&sched_register_mutex);
 228        tracer_enabled--;
 229        WARN_ON(tracer_enabled < 0);
 230        mutex_unlock(&sched_register_mutex);
 231
 232        tracing_stop_sched_switch();
 233}
 234
 235/**
 236 * tracing_sched_switch_assign_trace - assign a trace array for ctx switch
 237 * @tr: trace array pointer to assign
 238 *
 239 * Some tracers might want to record the context switches in their
 240 * trace. This function lets those tracers assign the trace array
 241 * to use.
 242 */
 243void tracing_sched_switch_assign_trace(struct trace_array *tr)
 244{
 245        ctx_trace = tr;
 246}
 247
 248
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.