linux/kernel/trace/trace_clock.c
<<
>>
Prefs
   1/*
   2 * tracing clocks
   3 *
   4 *  Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   5 *
   6 * Implements 3 trace clock variants, with differing scalability/precision
   7 * tradeoffs:
   8 *
   9 *  -   local: CPU-local trace clock
  10 *  -  medium: scalable global clock with some jitter
  11 *  -  global: globally monotonic, serialized clock
  12 *
  13 * Tracer plugins will chose a default from these clocks.
  14 */
  15#include <linux/spinlock.h>
  16#include <linux/irqflags.h>
  17#include <linux/hardirq.h>
  18#include <linux/module.h>
  19#include <linux/percpu.h>
  20#include <linux/sched.h>
  21#include <linux/ktime.h>
  22#include <linux/trace_clock.h>
  23
  24/*
  25 * trace_clock_local(): the simplest and least coherent tracing clock.
  26 *
  27 * Useful for tracing that does not cross to other CPUs nor
  28 * does it go through idle events.
  29 */
  30u64 notrace trace_clock_local(void)
  31{
  32        u64 clock;
  33
  34        /*
  35         * sched_clock() is an architecture implemented, fast, scalable,
  36         * lockless clock. It is not guaranteed to be coherent across
  37         * CPUs, nor across CPU idle events.
  38         */
  39        preempt_disable_notrace();
  40        clock = sched_clock();
  41        preempt_enable_notrace();
  42
  43        return clock;
  44}
  45EXPORT_SYMBOL_GPL(trace_clock_local);
  46
  47/*
  48 * trace_clock(): 'between' trace clock. Not completely serialized,
  49 * but not completely incorrect when crossing CPUs either.
  50 *
  51 * This is based on cpu_clock(), which will allow at most ~1 jiffy of
  52 * jitter between CPUs. So it's a pretty scalable clock, but there
  53 * can be offsets in the trace data.
  54 */
  55u64 notrace trace_clock(void)
  56{
  57        return local_clock();
  58}
  59
  60
  61/*
  62 * trace_clock_global(): special globally coherent trace clock
  63 *
  64 * It has higher overhead than the other trace clocks but is still
  65 * an order of magnitude faster than GTOD derived hardware clocks.
  66 *
  67 * Used by plugins that need globally coherent timestamps.
  68 */
  69
  70/* keep prev_time and lock in the same cacheline. */
  71static struct {
  72        u64 prev_time;
  73        arch_spinlock_t lock;
  74} trace_clock_struct ____cacheline_aligned_in_smp =
  75        {
  76                .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
  77        };
  78
  79u64 notrace trace_clock_global(void)
  80{
  81        unsigned long flags;
  82        int this_cpu;
  83        u64 now;
  84
  85        local_irq_save(flags);
  86
  87        this_cpu = raw_smp_processor_id();
  88        now = sched_clock_cpu(this_cpu);
  89        /*
  90         * If in an NMI context then dont risk lockups and return the
  91         * cpu_clock() time:
  92         */
  93        if (unlikely(in_nmi()))
  94                goto out;
  95
  96        arch_spin_lock(&trace_clock_struct.lock);
  97
  98        /*
  99         * TODO: if this happens often then maybe we should reset
 100         * my_scd->clock to prev_time+1, to make sure
 101         * we start ticking with the local clock from now on?
 102         */
 103        if ((s64)(now - trace_clock_struct.prev_time) < 0)
 104                now = trace_clock_struct.prev_time + 1;
 105
 106        trace_clock_struct.prev_time = now;
 107
 108        arch_spin_unlock(&trace_clock_struct.lock);
 109
 110 out:
 111        local_irq_restore(flags);
 112
 113        return now;
 114}
 115
 116static atomic64_t trace_counter;
 117
 118/*
 119 * trace_clock_counter(): simply an atomic counter.
 120 * Use the trace_counter "counter" for cases where you do not care
 121 * about timings, but are interested in strict ordering.
 122 */
 123u64 notrace trace_clock_counter(void)
 124{
 125        return atomic64_add_return(1, &trace_counter);
 126}
 127
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.