linux/kernel/latencytop.c
<<
>>
Prefs
   1/*
   2 * latencytop.c: Latency display infrastructure
   3 *
   4 * (C) Copyright 2008 Intel Corporation
   5 * Author: Arjan van de Ven <arjan@linux.intel.com>
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License
   9 * as published by the Free Software Foundation; version 2
  10 * of the License.
  11 */
  12
  13/*
  14 * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
  15 * used by the "latencytop" userspace tool. The latency that is tracked is not
  16 * the 'traditional' interrupt latency (which is primarily caused by something
  17 * else consuming CPU), but instead, it is the latency an application encounters
  18 * because the kernel sleeps on its behalf for various reasons.
  19 *
  20 * This code tracks 2 levels of statistics:
  21 * 1) System level latency
  22 * 2) Per process latency
  23 *
  24 * The latency is stored in fixed sized data structures in an accumulated form;
  25 * if the "same" latency cause is hit twice, this will be tracked as one entry
  26 * in the data structure. Both the count, total accumulated latency and maximum
  27 * latency are tracked in this data structure. When the fixed size structure is
  28 * full, no new causes are tracked until the buffer is flushed by writing to
  29 * the /proc file; the userspace tool does this on a regular basis.
  30 *
  31 * A latency cause is identified by a stringified backtrace at the point that
  32 * the scheduler gets invoked. The userland tool will use this string to
  33 * identify the cause of the latency in human readable form.
  34 *
  35 * The information is exported via /proc/latency_stats and /proc/<pid>/latency.
  36 * These files look like this:
  37 *
  38 * Latency Top version : v0.1
  39 * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
  40 * |    |    |    |
  41 * |    |    |    +----> the stringified backtrace
  42 * |    |    +---------> The maximum latency for this entry in microseconds
  43 * |    +--------------> The accumulated latency for this entry (microseconds)
  44 * +-------------------> The number of times this entry is hit
  45 *
  46 * (note: the average latency is the accumulated latency divided by the number
  47 * of times)
  48 */
  49
  50#include <linux/latencytop.h>
  51#include <linux/kallsyms.h>
  52#include <linux/seq_file.h>
  53#include <linux/notifier.h>
  54#include <linux/spinlock.h>
  55#include <linux/proc_fs.h>
  56#include <linux/module.h>
  57#include <linux/sched.h>
  58#include <linux/list.h>
  59#include <linux/stacktrace.h>
  60
  61static DEFINE_SPINLOCK(latency_lock);
  62
  63#define MAXLR 128
  64static struct latency_record latency_record[MAXLR];
  65
  66int latencytop_enabled;
  67
  68void clear_all_latency_tracing(struct task_struct *p)
  69{
  70        unsigned long flags;
  71
  72        if (!latencytop_enabled)
  73                return;
  74
  75        spin_lock_irqsave(&latency_lock, flags);
  76        memset(&p->latency_record, 0, sizeof(p->latency_record));
  77        p->latency_record_count = 0;
  78        spin_unlock_irqrestore(&latency_lock, flags);
  79}
  80
  81static void clear_global_latency_tracing(void)
  82{
  83        unsigned long flags;
  84
  85        spin_lock_irqsave(&latency_lock, flags);
  86        memset(&latency_record, 0, sizeof(latency_record));
  87        spin_unlock_irqrestore(&latency_lock, flags);
  88}
  89
  90static void __sched
  91account_global_scheduler_latency(struct task_struct *tsk, struct latency_record *lat)
  92{
  93        int firstnonnull = MAXLR + 1;
  94        int i;
  95
  96        if (!latencytop_enabled)
  97                return;
  98
  99        /* skip kernel threads for now */
 100        if (!tsk->mm)
 101                return;
 102
 103        for (i = 0; i < MAXLR; i++) {
 104                int q, same = 1;
 105
 106                /* Nothing stored: */
 107                if (!latency_record[i].backtrace[0]) {
 108                        if (firstnonnull > i)
 109                                firstnonnull = i;
 110                        continue;
 111                }
 112                for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
 113                        unsigned long record = lat->backtrace[q];
 114
 115                        if (latency_record[i].backtrace[q] != record) {
 116                                same = 0;
 117                                break;
 118                        }
 119
 120                        /* 0 and ULONG_MAX entries mean end of backtrace: */
 121                        if (record == 0 || record == ULONG_MAX)
 122                                break;
 123                }
 124                if (same) {
 125                        latency_record[i].count++;
 126                        latency_record[i].time += lat->time;
 127                        if (lat->time > latency_record[i].max)
 128                                latency_record[i].max = lat->time;
 129                        return;
 130                }
 131        }
 132
 133        i = firstnonnull;
 134        if (i >= MAXLR - 1)
 135                return;
 136
 137        /* Allocted a new one: */
 138        memcpy(&latency_record[i], lat, sizeof(struct latency_record));
 139}
 140
 141/*
 142 * Iterator to store a backtrace into a latency record entry
 143 */
 144static inline void store_stacktrace(struct task_struct *tsk,
 145                                        struct latency_record *lat)
 146{
 147        struct stack_trace trace;
 148
 149        memset(&trace, 0, sizeof(trace));
 150        trace.max_entries = LT_BACKTRACEDEPTH;
 151        trace.entries = &lat->backtrace[0];
 152        save_stack_trace_tsk(tsk, &trace);
 153}
 154
 155/**
 156 * __account_scheduler_latency - record an occurred latency
 157 * @tsk - the task struct of the task hitting the latency
 158 * @usecs - the duration of the latency in microseconds
 159 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
 160 *
 161 * This function is the main entry point for recording latency entries
 162 * as called by the scheduler.
 163 *
 164 * This function has a few special cases to deal with normal 'non-latency'
 165 * sleeps: specifically, interruptible sleep longer than 5 msec is skipped
 166 * since this usually is caused by waiting for events via select() and co.
 167 *
 168 * Negative latencies (caused by time going backwards) are also explicitly
 169 * skipped.
 170 */
 171void __sched
 172__account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
 173{
 174        unsigned long flags;
 175        int i, q;
 176        struct latency_record lat;
 177
 178        /* Long interruptible waits are generally user requested... */
 179        if (inter && usecs > 5000)
 180                return;
 181
 182        /* Negative sleeps are time going backwards */
 183        /* Zero-time sleeps are non-interesting */
 184        if (usecs <= 0)
 185                return;
 186
 187        memset(&lat, 0, sizeof(lat));
 188        lat.count = 1;
 189        lat.time = usecs;
 190        lat.max = usecs;
 191        store_stacktrace(tsk, &lat);
 192
 193        spin_lock_irqsave(&latency_lock, flags);
 194
 195        account_global_scheduler_latency(tsk, &lat);
 196
 197        for (i = 0; i < tsk->latency_record_count; i++) {
 198                struct latency_record *mylat;
 199                int same = 1;
 200
 201                mylat = &tsk->latency_record[i];
 202                for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
 203                        unsigned long record = lat.backtrace[q];
 204
 205                        if (mylat->backtrace[q] != record) {
 206                                same = 0;
 207                                break;
 208                        }
 209
 210                        /* 0 and ULONG_MAX entries mean end of backtrace: */
 211                        if (record == 0 || record == ULONG_MAX)
 212                                break;
 213                }
 214                if (same) {
 215                        mylat->count++;
 216                        mylat->time += lat.time;
 217                        if (lat.time > mylat->max)
 218                                mylat->max = lat.time;
 219                        goto out_unlock;
 220                }
 221        }
 222
 223        /*
 224         * short term hack; if we're > 32 we stop; future we recycle:
 225         */
 226        if (tsk->latency_record_count >= LT_SAVECOUNT)
 227                goto out_unlock;
 228
 229        /* Allocated a new one: */
 230        i = tsk->latency_record_count++;
 231        memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
 232
 233out_unlock:
 234        spin_unlock_irqrestore(&latency_lock, flags);
 235}
 236
 237static int lstats_show(struct seq_file *m, void *v)
 238{
 239        int i;
 240
 241        seq_puts(m, "Latency Top version : v0.1\n");
 242
 243        for (i = 0; i < MAXLR; i++) {
 244                struct latency_record *lr = &latency_record[i];
 245
 246                if (lr->backtrace[0]) {
 247                        int q;
 248                        seq_printf(m, "%i %lu %lu",
 249                                   lr->count, lr->time, lr->max);
 250                        for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
 251                                unsigned long bt = lr->backtrace[q];
 252                                if (!bt)
 253                                        break;
 254                                if (bt == ULONG_MAX)
 255                                        break;
 256                                seq_printf(m, " %ps", (void *)bt);
 257                        }
 258                        seq_printf(m, "\n");
 259                }
 260        }
 261        return 0;
 262}
 263
 264static ssize_t
 265lstats_write(struct file *file, const char __user *buf, size_t count,
 266             loff_t *offs)
 267{
 268        clear_global_latency_tracing();
 269
 270        return count;
 271}
 272
 273static int lstats_open(struct inode *inode, struct file *filp)
 274{
 275        return single_open(filp, lstats_show, NULL);
 276}
 277
 278static const struct file_operations lstats_fops = {
 279        .open           = lstats_open,
 280        .read           = seq_read,
 281        .write          = lstats_write,
 282        .llseek         = seq_lseek,
 283        .release        = single_release,
 284};
 285
 286static int __init init_lstats_procfs(void)
 287{
 288        proc_create("latency_stats", 0644, NULL, &lstats_fops);
 289        return 0;
 290}
 291device_initcall(init_lstats_procfs);
 292
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.