linux/kernel/trace/trace_stack.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
   3 *
   4 */
   5#include <linux/stacktrace.h>
   6#include <linux/kallsyms.h>
   7#include <linux/seq_file.h>
   8#include <linux/spinlock.h>
   9#include <linux/uaccess.h>
  10#include <linux/debugfs.h>
  11#include <linux/ftrace.h>
  12#include <linux/module.h>
  13#include <linux/sysctl.h>
  14#include <linux/init.h>
  15#include <linux/fs.h>
  16
  17#include <asm/setup.h>
  18
  19#include "trace.h"
  20
  21#define STACK_TRACE_ENTRIES 500
  22
  23static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
  24         { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
  25static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
  26
  27static struct stack_trace max_stack_trace = {
  28        .max_entries            = STACK_TRACE_ENTRIES,
  29        .entries                = stack_dump_trace,
  30};
  31
  32static unsigned long max_stack_size;
  33static arch_spinlock_t max_stack_lock =
  34        (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
  35
  36static int stack_trace_disabled __read_mostly;
  37static DEFINE_PER_CPU(int, trace_active);
  38static DEFINE_MUTEX(stack_sysctl_mutex);
  39
  40int stack_tracer_enabled;
  41static int last_stack_tracer_enabled;
  42
  43static inline void check_stack(void)
  44{
  45        unsigned long this_size, flags;
  46        unsigned long *p, *top, *start;
  47        int i;
  48
  49        this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
  50        this_size = THREAD_SIZE - this_size;
  51
  52        if (this_size <= max_stack_size)
  53                return;
  54
  55        /* we do not handle interrupt stacks yet */
  56        if (!object_is_on_stack(&this_size))
  57                return;
  58
  59        local_irq_save(flags);
  60        arch_spin_lock(&max_stack_lock);
  61
  62        /* a race could have already updated it */
  63        if (this_size <= max_stack_size)
  64                goto out;
  65
  66        max_stack_size = this_size;
  67
  68        max_stack_trace.nr_entries      = 0;
  69        max_stack_trace.skip            = 3;
  70
  71        save_stack_trace(&max_stack_trace);
  72
  73        /*
  74         * Now find where in the stack these are.
  75         */
  76        i = 0;
  77        start = &this_size;
  78        top = (unsigned long *)
  79                (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
  80
  81        /*
  82         * Loop through all the entries. One of the entries may
  83         * for some reason be missed on the stack, so we may
  84         * have to account for them. If they are all there, this
  85         * loop will only happen once. This code only takes place
  86         * on a new max, so it is far from a fast path.
  87         */
  88        while (i < max_stack_trace.nr_entries) {
  89                int found = 0;
  90
  91                stack_dump_index[i] = this_size;
  92                p = start;
  93
  94                for (; p < top && i < max_stack_trace.nr_entries; p++) {
  95                        if (*p == stack_dump_trace[i]) {
  96                                this_size = stack_dump_index[i++] =
  97                                        (top - p) * sizeof(unsigned long);
  98                                found = 1;
  99                                /* Start the search from here */
 100                                start = p + 1;
 101                        }
 102                }
 103
 104                if (!found)
 105                        i++;
 106        }
 107
 108 out:
 109        arch_spin_unlock(&max_stack_lock);
 110        local_irq_restore(flags);
 111}
 112
 113static void
 114stack_trace_call(unsigned long ip, unsigned long parent_ip,
 115                 struct ftrace_ops *op, struct pt_regs *pt_regs)
 116{
 117        int cpu;
 118
 119        if (unlikely(!ftrace_enabled || stack_trace_disabled))
 120                return;
 121
 122        preempt_disable_notrace();
 123
 124        cpu = raw_smp_processor_id();
 125        /* no atomic needed, we only modify this variable by this cpu */
 126        if (per_cpu(trace_active, cpu)++ != 0)
 127                goto out;
 128
 129        check_stack();
 130
 131 out:
 132        per_cpu(trace_active, cpu)--;
 133        /* prevent recursion in schedule */
 134        preempt_enable_notrace();
 135}
 136
 137static struct ftrace_ops trace_ops __read_mostly =
 138{
 139        .func = stack_trace_call,
 140        .flags = FTRACE_OPS_FL_RECURSION_SAFE,
 141};
 142
 143static ssize_t
 144stack_max_size_read(struct file *filp, char __user *ubuf,
 145                    size_t count, loff_t *ppos)
 146{
 147        unsigned long *ptr = filp->private_data;
 148        char buf[64];
 149        int r;
 150
 151        r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
 152        if (r > sizeof(buf))
 153                r = sizeof(buf);
 154        return simple_read_from_buffer(ubuf, count, ppos, buf, r);
 155}
 156
 157static ssize_t
 158stack_max_size_write(struct file *filp, const char __user *ubuf,
 159                     size_t count, loff_t *ppos)
 160{
 161        long *ptr = filp->private_data;
 162        unsigned long val, flags;
 163        int ret;
 164        int cpu;
 165
 166        ret = kstrtoul_from_user(ubuf, count, 10, &val);
 167        if (ret)
 168                return ret;
 169
 170        local_irq_save(flags);
 171
 172        /*
 173         * In case we trace inside arch_spin_lock() or after (NMI),
 174         * we will cause circular lock, so we also need to increase
 175         * the percpu trace_active here.
 176         */
 177        cpu = smp_processor_id();
 178        per_cpu(trace_active, cpu)++;
 179
 180        arch_spin_lock(&max_stack_lock);
 181        *ptr = val;
 182        arch_spin_unlock(&max_stack_lock);
 183
 184        per_cpu(trace_active, cpu)--;
 185        local_irq_restore(flags);
 186
 187        return count;
 188}
 189
 190static const struct file_operations stack_max_size_fops = {
 191        .open           = tracing_open_generic,
 192        .read           = stack_max_size_read,
 193        .write          = stack_max_size_write,
 194        .llseek         = default_llseek,
 195};
 196
 197static void *
 198__next(struct seq_file *m, loff_t *pos)
 199{
 200        long n = *pos - 1;
 201
 202        if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
 203                return NULL;
 204
 205        m->private = (void *)n;
 206        return &m->private;
 207}
 208
 209static void *
 210t_next(struct seq_file *m, void *v, loff_t *pos)
 211{
 212        (*pos)++;
 213        return __next(m, pos);
 214}
 215
 216static void *t_start(struct seq_file *m, loff_t *pos)
 217{
 218        int cpu;
 219
 220        local_irq_disable();
 221
 222        cpu = smp_processor_id();
 223        per_cpu(trace_active, cpu)++;
 224
 225        arch_spin_lock(&max_stack_lock);
 226
 227        if (*pos == 0)
 228                return SEQ_START_TOKEN;
 229
 230        return __next(m, pos);
 231}
 232
 233static void t_stop(struct seq_file *m, void *p)
 234{
 235        int cpu;
 236
 237        arch_spin_unlock(&max_stack_lock);
 238
 239        cpu = smp_processor_id();
 240        per_cpu(trace_active, cpu)--;
 241
 242        local_irq_enable();
 243}
 244
 245static int trace_lookup_stack(struct seq_file *m, long i)
 246{
 247        unsigned long addr = stack_dump_trace[i];
 248
 249        return seq_printf(m, "%pS\n", (void *)addr);
 250}
 251
 252static void print_disabled(struct seq_file *m)
 253{
 254        seq_puts(m, "#\n"
 255                 "#  Stack tracer disabled\n"
 256                 "#\n"
 257                 "# To enable the stack tracer, either add 'stacktrace' to the\n"
 258                 "# kernel command line\n"
 259                 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
 260                 "#\n");
 261}
 262
 263static int t_show(struct seq_file *m, void *v)
 264{
 265        long i;
 266        int size;
 267
 268        if (v == SEQ_START_TOKEN) {
 269                seq_printf(m, "        Depth    Size   Location"
 270                           "    (%d entries)\n"
 271                           "        -----    ----   --------\n",
 272                           max_stack_trace.nr_entries - 1);
 273
 274                if (!stack_tracer_enabled && !max_stack_size)
 275                        print_disabled(m);
 276
 277                return 0;
 278        }
 279
 280        i = *(long *)v;
 281
 282        if (i >= max_stack_trace.nr_entries ||
 283            stack_dump_trace[i] == ULONG_MAX)
 284                return 0;
 285
 286        if (i+1 == max_stack_trace.nr_entries ||
 287            stack_dump_trace[i+1] == ULONG_MAX)
 288                size = stack_dump_index[i];
 289        else
 290                size = stack_dump_index[i] - stack_dump_index[i+1];
 291
 292        seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
 293
 294        trace_lookup_stack(m, i);
 295
 296        return 0;
 297}
 298
 299static const struct seq_operations stack_trace_seq_ops = {
 300        .start          = t_start,
 301        .next           = t_next,
 302        .stop           = t_stop,
 303        .show           = t_show,
 304};
 305
 306static int stack_trace_open(struct inode *inode, struct file *file)
 307{
 308        return seq_open(file, &stack_trace_seq_ops);
 309}
 310
 311static const struct file_operations stack_trace_fops = {
 312        .open           = stack_trace_open,
 313        .read           = seq_read,
 314        .llseek         = seq_lseek,
 315        .release        = seq_release,
 316};
 317
 318static int
 319stack_trace_filter_open(struct inode *inode, struct file *file)
 320{
 321        return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
 322                                 inode, file);
 323}
 324
 325static const struct file_operations stack_trace_filter_fops = {
 326        .open = stack_trace_filter_open,
 327        .read = seq_read,
 328        .write = ftrace_filter_write,
 329        .llseek = ftrace_regex_lseek,
 330        .release = ftrace_regex_release,
 331};
 332
 333int
 334stack_trace_sysctl(struct ctl_table *table, int write,
 335                   void __user *buffer, size_t *lenp,
 336                   loff_t *ppos)
 337{
 338        int ret;
 339
 340        mutex_lock(&stack_sysctl_mutex);
 341
 342        ret = proc_dointvec(table, write, buffer, lenp, ppos);
 343
 344        if (ret || !write ||
 345            (last_stack_tracer_enabled == !!stack_tracer_enabled))
 346                goto out;
 347
 348        last_stack_tracer_enabled = !!stack_tracer_enabled;
 349
 350        if (stack_tracer_enabled)
 351                register_ftrace_function(&trace_ops);
 352        else
 353                unregister_ftrace_function(&trace_ops);
 354
 355 out:
 356        mutex_unlock(&stack_sysctl_mutex);
 357        return ret;
 358}
 359
 360static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
 361
 362static __init int enable_stacktrace(char *str)
 363{
 364        if (strncmp(str, "_filter=", 8) == 0)
 365                strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
 366
 367        stack_tracer_enabled = 1;
 368        last_stack_tracer_enabled = 1;
 369        return 1;
 370}
 371__setup("stacktrace", enable_stacktrace);
 372
 373static __init int stack_trace_init(void)
 374{
 375        struct dentry *d_tracer;
 376
 377        d_tracer = tracing_init_dentry();
 378
 379        trace_create_file("stack_max_size", 0644, d_tracer,
 380                        &max_stack_size, &stack_max_size_fops);
 381
 382        trace_create_file("stack_trace", 0444, d_tracer,
 383                        NULL, &stack_trace_fops);
 384
 385        trace_create_file("stack_trace_filter", 0444, d_tracer,
 386                        NULL, &stack_trace_filter_fops);
 387
 388        if (stack_trace_filter_buf[0])
 389                ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
 390
 391        if (stack_tracer_enabled)
 392                register_ftrace_function(&trace_ops);
 393
 394        return 0;
 395}
 396
 397device_initcall(stack_trace_init);
 398
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.