linux/include/linux/clocksource.h
<<
>>
Prefs
   1/*  linux/include/linux/clocksource.h
   2 *
   3 *  This file contains the structure definitions for clocksources.
   4 *
   5 *  If you are not a clocksource, or timekeeping code, you should
   6 *  not be including this file!
   7 */
   8#ifndef _LINUX_CLOCKSOURCE_H
   9#define _LINUX_CLOCKSOURCE_H
  10
  11#include <linux/types.h>
  12#include <linux/timex.h>
  13#include <linux/time.h>
  14#include <linux/list.h>
  15#include <linux/cache.h>
  16#include <linux/timer.h>
  17#include <linux/init.h>
  18#include <asm/div64.h>
  19#include <asm/io.h>
  20
  21/* clocksource cycle base type */
  22typedef u64 cycle_t;
  23struct clocksource;
  24
  25#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
  26#include <asm/clocksource.h>
  27#endif
  28
  29/**
  30 * struct cyclecounter - hardware abstraction for a free running counter
  31 *      Provides completely state-free accessors to the underlying hardware.
  32 *      Depending on which hardware it reads, the cycle counter may wrap
  33 *      around quickly. Locking rules (if necessary) have to be defined
  34 *      by the implementor and user of specific instances of this API.
  35 *
  36 * @read:               returns the current cycle value
  37 * @mask:               bitmask for two's complement
  38 *                      subtraction of non 64 bit counters,
  39 *                      see CLOCKSOURCE_MASK() helper macro
  40 * @mult:               cycle to nanosecond multiplier
  41 * @shift:              cycle to nanosecond divisor (power of two)
  42 */
  43struct cyclecounter {
  44        cycle_t (*read)(const struct cyclecounter *cc);
  45        cycle_t mask;
  46        u32 mult;
  47        u32 shift;
  48};
  49
  50/**
  51 * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
  52 *      Contains the state needed by timecounter_read() to detect
  53 *      cycle counter wrap around. Initialize with
  54 *      timecounter_init(). Also used to convert cycle counts into the
  55 *      corresponding nanosecond counts with timecounter_cyc2time(). Users
  56 *      of this code are responsible for initializing the underlying
  57 *      cycle counter hardware, locking issues and reading the time
  58 *      more often than the cycle counter wraps around. The nanosecond
  59 *      counter will only wrap around after ~585 years.
  60 *
  61 * @cc:                 the cycle counter used by this instance
  62 * @cycle_last:         most recent cycle counter value seen by
  63 *                      timecounter_read()
  64 * @nsec:               continuously increasing count
  65 */
  66struct timecounter {
  67        const struct cyclecounter *cc;
  68        cycle_t cycle_last;
  69        u64 nsec;
  70};
  71
  72/**
  73 * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
  74 * @cc:         Pointer to cycle counter.
  75 * @cycles:     Cycles
  76 *
  77 * XXX - This could use some mult_lxl_ll() asm optimization. Same code
  78 * as in cyc2ns, but with unsigned result.
  79 */
  80static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
  81                                      cycle_t cycles)
  82{
  83        u64 ret = (u64)cycles;
  84        ret = (ret * cc->mult) >> cc->shift;
  85        return ret;
  86}
  87
  88/**
  89 * timecounter_init - initialize a time counter
  90 * @tc:                 Pointer to time counter which is to be initialized/reset
  91 * @cc:                 A cycle counter, ready to be used.
  92 * @start_tstamp:       Arbitrary initial time stamp.
  93 *
  94 * After this call the current cycle register (roughly) corresponds to
  95 * the initial time stamp. Every call to timecounter_read() increments
  96 * the time stamp counter by the number of elapsed nanoseconds.
  97 */
  98extern void timecounter_init(struct timecounter *tc,
  99                             const struct cyclecounter *cc,
 100                             u64 start_tstamp);
 101
 102/**
 103 * timecounter_read - return nanoseconds elapsed since timecounter_init()
 104 *                    plus the initial time stamp
 105 * @tc:          Pointer to time counter.
 106 *
 107 * In other words, keeps track of time since the same epoch as
 108 * the function which generated the initial time stamp.
 109 */
 110extern u64 timecounter_read(struct timecounter *tc);
 111
 112/**
 113 * timecounter_cyc2time - convert a cycle counter to same
 114 *                        time base as values returned by
 115 *                        timecounter_read()
 116 * @tc:         Pointer to time counter.
 117 * @cycle_tstamp:       a value returned by tc->cc->read()
 118 *
 119 * Cycle counts that are converted correctly as long as they
 120 * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
 121 * with "max cycle count" == cs->mask+1.
 122 *
 123 * This allows conversion of cycle counter values which were generated
 124 * in the past.
 125 */
 126extern u64 timecounter_cyc2time(struct timecounter *tc,
 127                                cycle_t cycle_tstamp);
 128
 129/**
 130 * struct clocksource - hardware abstraction for a free running counter
 131 *      Provides mostly state-free accessors to the underlying hardware.
 132 *      This is the structure used for system time.
 133 *
 134 * @name:               ptr to clocksource name
 135 * @list:               list head for registration
 136 * @rating:             rating value for selection (higher is better)
 137 *                      To avoid rating inflation the following
 138 *                      list should give you a guide as to how
 139 *                      to assign your clocksource a rating
 140 *                      1-99: Unfit for real use
 141 *                              Only available for bootup and testing purposes.
 142 *                      100-199: Base level usability.
 143 *                              Functional for real use, but not desired.
 144 *                      200-299: Good.
 145 *                              A correct and usable clocksource.
 146 *                      300-399: Desired.
 147 *                              A reasonably fast and accurate clocksource.
 148 *                      400-499: Perfect
 149 *                              The ideal clocksource. A must-use where
 150 *                              available.
 151 * @read:               returns a cycle value, passes clocksource as argument
 152 * @enable:             optional function to enable the clocksource
 153 * @disable:            optional function to disable the clocksource
 154 * @mask:               bitmask for two's complement
 155 *                      subtraction of non 64 bit counters
 156 * @mult:               cycle to nanosecond multiplier
 157 * @shift:              cycle to nanosecond divisor (power of two)
 158 * @max_idle_ns:        max idle time permitted by the clocksource (nsecs)
 159 * @maxadj:             maximum adjustment value to mult (~11%)
 160 * @flags:              flags describing special properties
 161 * @archdata:           arch-specific data
 162 * @suspend:            suspend function for the clocksource, if necessary
 163 * @resume:             resume function for the clocksource, if necessary
 164 * @cycle_last:         most recent cycle counter value seen by ::read()
 165 */
 166struct clocksource {
 167        /*
 168         * Hotpath data, fits in a single cache line when the
 169         * clocksource itself is cacheline aligned.
 170         */
 171        cycle_t (*read)(struct clocksource *cs);
 172        cycle_t cycle_last;
 173        cycle_t mask;
 174        u32 mult;
 175        u32 shift;
 176        u64 max_idle_ns;
 177        u32 maxadj;
 178#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
 179        struct arch_clocksource_data archdata;
 180#endif
 181
 182        const char *name;
 183        struct list_head list;
 184        int rating;
 185        int (*enable)(struct clocksource *cs);
 186        void (*disable)(struct clocksource *cs);
 187        unsigned long flags;
 188        void (*suspend)(struct clocksource *cs);
 189        void (*resume)(struct clocksource *cs);
 190
 191        /* private: */
 192#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
 193        /* Watchdog related data, used by the framework */
 194        struct list_head wd_list;
 195        cycle_t cs_last;
 196        cycle_t wd_last;
 197#endif
 198} ____cacheline_aligned;
 199
 200/*
 201 * Clock source flags bits::
 202 */
 203#define CLOCK_SOURCE_IS_CONTINUOUS              0x01
 204#define CLOCK_SOURCE_MUST_VERIFY                0x02
 205
 206#define CLOCK_SOURCE_WATCHDOG                   0x10
 207#define CLOCK_SOURCE_VALID_FOR_HRES             0x20
 208#define CLOCK_SOURCE_UNSTABLE                   0x40
 209
 210/* simplify initialization of mask field */
 211#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
 212
 213/**
 214 * clocksource_khz2mult - calculates mult from khz and shift
 215 * @khz:                Clocksource frequency in KHz
 216 * @shift_constant:     Clocksource shift factor
 217 *
 218 * Helper functions that converts a khz counter frequency to a timsource
 219 * multiplier, given the clocksource shift value
 220 */
 221static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
 222{
 223        /*  khz = cyc/(Million ns)
 224         *  mult/2^shift  = ns/cyc
 225         *  mult = ns/cyc * 2^shift
 226         *  mult = 1Million/khz * 2^shift
 227         *  mult = 1000000 * 2^shift / khz
 228         *  mult = (1000000<<shift) / khz
 229         */
 230        u64 tmp = ((u64)1000000) << shift_constant;
 231
 232        tmp += khz/2; /* round for do_div */
 233        do_div(tmp, khz);
 234
 235        return (u32)tmp;
 236}
 237
 238/**
 239 * clocksource_hz2mult - calculates mult from hz and shift
 240 * @hz:                 Clocksource frequency in Hz
 241 * @shift_constant:     Clocksource shift factor
 242 *
 243 * Helper functions that converts a hz counter
 244 * frequency to a timsource multiplier, given the
 245 * clocksource shift value
 246 */
 247static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
 248{
 249        /*  hz = cyc/(Billion ns)
 250         *  mult/2^shift  = ns/cyc
 251         *  mult = ns/cyc * 2^shift
 252         *  mult = 1Billion/hz * 2^shift
 253         *  mult = 1000000000 * 2^shift / hz
 254         *  mult = (1000000000<<shift) / hz
 255         */
 256        u64 tmp = ((u64)1000000000) << shift_constant;
 257
 258        tmp += hz/2; /* round for do_div */
 259        do_div(tmp, hz);
 260
 261        return (u32)tmp;
 262}
 263
 264/**
 265 * clocksource_cyc2ns - converts clocksource cycles to nanoseconds
 266 * @cycles:     cycles
 267 * @mult:       cycle to nanosecond multiplier
 268 * @shift:      cycle to nanosecond divisor (power of two)
 269 *
 270 * Converts cycles to nanoseconds, using the given mult and shift.
 271 *
 272 * XXX - This could use some mult_lxl_ll() asm optimization
 273 */
 274static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift)
 275{
 276        return ((u64) cycles * mult) >> shift;
 277}
 278
 279
 280extern int clocksource_register(struct clocksource*);
 281extern void clocksource_unregister(struct clocksource*);
 282extern void clocksource_touch_watchdog(void);
 283extern struct clocksource* clocksource_get_next(void);
 284extern void clocksource_change_rating(struct clocksource *cs, int rating);
 285extern void clocksource_suspend(void);
 286extern void clocksource_resume(void);
 287extern struct clocksource * __init __weak clocksource_default_clock(void);
 288extern void clocksource_mark_unstable(struct clocksource *cs);
 289
 290extern void
 291clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec);
 292
 293/*
 294 * Don't call __clocksource_register_scale directly, use
 295 * clocksource_register_hz/khz
 296 */
 297extern int
 298__clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq);
 299extern void
 300__clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq);
 301
 302static inline int clocksource_register_hz(struct clocksource *cs, u32 hz)
 303{
 304        return __clocksource_register_scale(cs, 1, hz);
 305}
 306
 307static inline int clocksource_register_khz(struct clocksource *cs, u32 khz)
 308{
 309        return __clocksource_register_scale(cs, 1000, khz);
 310}
 311
 312static inline void __clocksource_updatefreq_hz(struct clocksource *cs, u32 hz)
 313{
 314        __clocksource_updatefreq_scale(cs, 1, hz);
 315}
 316
 317static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz)
 318{
 319        __clocksource_updatefreq_scale(cs, 1000, khz);
 320}
 321
 322
 323extern void timekeeping_notify(struct clocksource *clock);
 324
 325extern cycle_t clocksource_mmio_readl_up(struct clocksource *);
 326extern cycle_t clocksource_mmio_readl_down(struct clocksource *);
 327extern cycle_t clocksource_mmio_readw_up(struct clocksource *);
 328extern cycle_t clocksource_mmio_readw_down(struct clocksource *);
 329
 330extern int clocksource_mmio_init(void __iomem *, const char *,
 331        unsigned long, int, unsigned, cycle_t (*)(struct clocksource *));
 332
 333extern int clocksource_i8253_init(void);
 334
 335#ifdef CONFIG_CLKSRC_OF
 336extern void clocksource_of_init(void);
 337
 338#define CLOCKSOURCE_OF_DECLARE(name, compat, fn)                        \
 339        static const struct of_device_id __clksrc_of_table_##name       \
 340                __used __section(__clksrc_of_table)                     \
 341                 = { .compatible = compat, .data = fn };
 342#else
 343#define CLOCKSOURCE_OF_DECLARE(name, compat, fn)
 344#endif
 345
 346#endif /* _LINUX_CLOCKSOURCE_H */
 347
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.