linux/arch/mips/include/asm/timex.h
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1998, 1999, 2003 by Ralf Baechle
   7 * Copyright (C) 2014 by Maciej W. Rozycki
   8 */
   9#ifndef _ASM_TIMEX_H
  10#define _ASM_TIMEX_H
  11
  12#ifdef __KERNEL__
  13
  14#include <linux/compiler.h>
  15
  16#include <asm/cpu.h>
  17#include <asm/cpu-features.h>
  18#include <asm/mipsregs.h>
  19#include <asm/cpu-type.h>
  20
  21/*
  22 * This is the clock rate of the i8253 PIT.  A MIPS system may not have
  23 * a PIT by the symbol is used all over the kernel including some APIs.
  24 * So keeping it defined to the number for the PIT is the only sane thing
  25 * for now.
  26 */
  27#define CLOCK_TICK_RATE 1193182
  28
  29/*
  30 * Standard way to access the cycle counter.
  31 * Currently only used on SMP for scheduling.
  32 *
  33 * Only the low 32 bits are available as a continuously counting entity.
  34 * But this only means we'll force a reschedule every 8 seconds or so,
  35 * which isn't an evil thing.
  36 *
  37 * We know that all SMP capable CPUs have cycle counters.
  38 */
  39
  40typedef unsigned int cycles_t;
  41
  42/*
  43 * On R4000/R4400 before version 5.0 an erratum exists such that if the
  44 * cycle counter is read in the exact moment that it is matching the
  45 * compare register, no interrupt will be generated.
  46 *
  47 * There is a suggested workaround and also the erratum can't strike if
  48 * the compare interrupt isn't being used as the clock source device.
  49 * However for now the implementaton of this function doesn't get these
  50 * fine details right.
  51 */
  52static inline int can_use_mips_counter(unsigned int prid)
  53{
  54        int comp = (prid & PRID_COMP_MASK) != PRID_COMP_LEGACY;
  55
  56        if (__builtin_constant_p(cpu_has_counter) && !cpu_has_counter)
  57                return 0;
  58        else if (__builtin_constant_p(cpu_has_mips_r) && cpu_has_mips_r)
  59                return 1;
  60        else if (likely(!__builtin_constant_p(cpu_has_mips_r) && comp))
  61                return 1;
  62        /* Make sure we don't peek at cpu_data[0].options in the fast path! */
  63        if (!__builtin_constant_p(cpu_has_counter))
  64                asm volatile("" : "=m" (cpu_data[0].options));
  65        if (likely(cpu_has_counter &&
  66                   prid >= (PRID_IMP_R4000 | PRID_REV_ENCODE_44(5, 0))))
  67                return 1;
  68        else
  69                return 0;
  70}
  71
  72static inline cycles_t get_cycles(void)
  73{
  74        if (can_use_mips_counter(read_c0_prid()))
  75                return read_c0_count();
  76        else
  77                return 0;       /* no usable counter */
  78}
  79
  80/*
  81 * Like get_cycles - but where c0_count is not available we desperately
  82 * use c0_random in an attempt to get at least a little bit of entropy.
  83 *
  84 * R6000 and R6000A neither have a count register nor a random register.
  85 * That leaves no entropy source in the CPU itself.
  86 */
  87static inline unsigned long random_get_entropy(void)
  88{
  89        unsigned int prid = read_c0_prid();
  90        unsigned int imp = prid & PRID_IMP_MASK;
  91
  92        if (can_use_mips_counter(prid))
  93                return read_c0_count();
  94        else if (likely(imp != PRID_IMP_R6000 && imp != PRID_IMP_R6000A))
  95                return read_c0_random();
  96        else
  97                return 0;       /* no usable register */
  98}
  99#define random_get_entropy random_get_entropy
 100
 101#endif /* __KERNEL__ */
 102
 103#endif /*  _ASM_TIMEX_H */
 104