linux/arch/x86/vdso/vclock_gettime.c
<<
>>
Prefs
   1/*
   2 * Copyright 2006 Andi Kleen, SUSE Labs.
   3 * Subject to the GNU Public License, v.2
   4 *
   5 * Fast user context implementation of clock_gettime, gettimeofday, and time.
   6 *
   7 * The code should have no internal unresolved relocations.
   8 * Check with readelf after changing.
   9 */
  10
  11/* Disable profiling for userspace code: */
  12#define DISABLE_BRANCH_PROFILING
  13
  14#include <linux/kernel.h>
  15#include <linux/posix-timers.h>
  16#include <linux/time.h>
  17#include <linux/string.h>
  18#include <asm/vsyscall.h>
  19#include <asm/fixmap.h>
  20#include <asm/vgtod.h>
  21#include <asm/timex.h>
  22#include <asm/hpet.h>
  23#include <asm/unistd.h>
  24#include <asm/io.h>
  25
  26#define gtod (&VVAR(vsyscall_gtod_data))
  27
  28notrace static cycle_t vread_tsc(void)
  29{
  30        cycle_t ret;
  31        u64 last;
  32
  33        /*
  34         * Empirically, a fence (of type that depends on the CPU)
  35         * before rdtsc is enough to ensure that rdtsc is ordered
  36         * with respect to loads.  The various CPU manuals are unclear
  37         * as to whether rdtsc can be reordered with later loads,
  38         * but no one has ever seen it happen.
  39         */
  40        rdtsc_barrier();
  41        ret = (cycle_t)vget_cycles();
  42
  43        last = VVAR(vsyscall_gtod_data).clock.cycle_last;
  44
  45        if (likely(ret >= last))
  46                return ret;
  47
  48        /*
  49         * GCC likes to generate cmov here, but this branch is extremely
  50         * predictable (it's just a funciton of time and the likely is
  51         * very likely) and there's a data dependence, so force GCC
  52         * to generate a branch instead.  I don't barrier() because
  53         * we don't actually need a barrier, and if this function
  54         * ever gets inlined it will generate worse code.
  55         */
  56        asm volatile ("");
  57        return last;
  58}
  59
  60static notrace cycle_t vread_hpet(void)
  61{
  62        return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
  63}
  64
  65notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
  66{
  67        long ret;
  68        asm("syscall" : "=a" (ret) :
  69            "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
  70        return ret;
  71}
  72
  73notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
  74{
  75        long ret;
  76
  77        asm("syscall" : "=a" (ret) :
  78            "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
  79        return ret;
  80}
  81
  82
  83notrace static inline long vgetns(void)
  84{
  85        long v;
  86        cycles_t cycles;
  87        if (gtod->clock.vclock_mode == VCLOCK_TSC)
  88                cycles = vread_tsc();
  89        else if (gtod->clock.vclock_mode == VCLOCK_HPET)
  90                cycles = vread_hpet();
  91        else
  92                return 0;
  93        v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask;
  94        return (v * gtod->clock.mult) >> gtod->clock.shift;
  95}
  96
  97/* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
  98notrace static int __always_inline do_realtime(struct timespec *ts)
  99{
 100        unsigned long seq, ns;
 101        int mode;
 102
 103        do {
 104                seq = read_seqcount_begin(&gtod->seq);
 105                mode = gtod->clock.vclock_mode;
 106                ts->tv_sec = gtod->wall_time_sec;
 107                ts->tv_nsec = gtod->wall_time_nsec;
 108                ns = vgetns();
 109        } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
 110
 111        timespec_add_ns(ts, ns);
 112        return mode;
 113}
 114
 115notrace static int do_monotonic(struct timespec *ts)
 116{
 117        unsigned long seq, ns;
 118        int mode;
 119
 120        do {
 121                seq = read_seqcount_begin(&gtod->seq);
 122                mode = gtod->clock.vclock_mode;
 123                ts->tv_sec = gtod->monotonic_time_sec;
 124                ts->tv_nsec = gtod->monotonic_time_nsec;
 125                ns = vgetns();
 126        } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
 127        timespec_add_ns(ts, ns);
 128
 129        return mode;
 130}
 131
 132notrace static int do_realtime_coarse(struct timespec *ts)
 133{
 134        unsigned long seq;
 135        do {
 136                seq = read_seqcount_begin(&gtod->seq);
 137                ts->tv_sec = gtod->wall_time_coarse.tv_sec;
 138                ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
 139        } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
 140        return 0;
 141}
 142
 143notrace static int do_monotonic_coarse(struct timespec *ts)
 144{
 145        unsigned long seq;
 146        do {
 147                seq = read_seqcount_begin(&gtod->seq);
 148                ts->tv_sec = gtod->monotonic_time_coarse.tv_sec;
 149                ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec;
 150        } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
 151
 152        return 0;
 153}
 154
 155notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
 156{
 157        int ret = VCLOCK_NONE;
 158
 159        switch (clock) {
 160        case CLOCK_REALTIME:
 161                ret = do_realtime(ts);
 162                break;
 163        case CLOCK_MONOTONIC:
 164                ret = do_monotonic(ts);
 165                break;
 166        case CLOCK_REALTIME_COARSE:
 167                return do_realtime_coarse(ts);
 168        case CLOCK_MONOTONIC_COARSE:
 169                return do_monotonic_coarse(ts);
 170        }
 171
 172        if (ret == VCLOCK_NONE)
 173                return vdso_fallback_gettime(clock, ts);
 174        return 0;
 175}
 176int clock_gettime(clockid_t, struct timespec *)
 177        __attribute__((weak, alias("__vdso_clock_gettime")));
 178
 179notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
 180{
 181        long ret = VCLOCK_NONE;
 182
 183        if (likely(tv != NULL)) {
 184                BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
 185                             offsetof(struct timespec, tv_nsec) ||
 186                             sizeof(*tv) != sizeof(struct timespec));
 187                ret = do_realtime((struct timespec *)tv);
 188                tv->tv_usec /= 1000;
 189        }
 190        if (unlikely(tz != NULL)) {
 191                /* Avoid memcpy. Some old compilers fail to inline it */
 192                tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
 193                tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
 194        }
 195
 196        if (ret == VCLOCK_NONE)
 197                return vdso_fallback_gtod(tv, tz);
 198        return 0;
 199}
 200int gettimeofday(struct timeval *, struct timezone *)
 201        __attribute__((weak, alias("__vdso_gettimeofday")));
 202
 203/*
 204 * This will break when the xtime seconds get inaccurate, but that is
 205 * unlikely
 206 */
 207notrace time_t __vdso_time(time_t *t)
 208{
 209        /* This is atomic on x86_64 so we don't need any locks. */
 210        time_t result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);
 211
 212        if (t)
 213                *t = result;
 214        return result;
 215}
 216int time(time_t *t)
 217        __attribute__((weak, alias("__vdso_time")));
 218