linux/kernel/time/timekeeping.c
<<
>>
Prefs
   1/*
   2 *  linux/kernel/time/timekeeping.c
   3 *
   4 *  Kernel timekeeping code and accessor functions
   5 *
   6 *  This code was moved from linux/kernel/timer.c.
   7 *  Please see that file for copyright and history logs.
   8 *
   9 */
  10
  11#include <linux/timekeeper_internal.h>
  12#include <linux/module.h>
  13#include <linux/interrupt.h>
  14#include <linux/percpu.h>
  15#include <linux/init.h>
  16#include <linux/mm.h>
  17#include <linux/sched.h>
  18#include <linux/syscore_ops.h>
  19#include <linux/clocksource.h>
  20#include <linux/jiffies.h>
  21#include <linux/time.h>
  22#include <linux/tick.h>
  23#include <linux/stop_machine.h>
  24#include <linux/pvclock_gtod.h>
  25
  26
  27static struct timekeeper timekeeper;
  28
  29/* flag for if timekeeping is suspended */
  30int __read_mostly timekeeping_suspended;
  31
  32/* Flag for if there is a persistent clock on this platform */
  33bool __read_mostly persistent_clock_exist = false;
  34
  35static inline void tk_normalize_xtime(struct timekeeper *tk)
  36{
  37        while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) {
  38                tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift;
  39                tk->xtime_sec++;
  40        }
  41}
  42
  43static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts)
  44{
  45        tk->xtime_sec = ts->tv_sec;
  46        tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift;
  47}
  48
  49static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
  50{
  51        tk->xtime_sec += ts->tv_sec;
  52        tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
  53        tk_normalize_xtime(tk);
  54}
  55
  56static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
  57{
  58        struct timespec tmp;
  59
  60        /*
  61         * Verify consistency of: offset_real = -wall_to_monotonic
  62         * before modifying anything
  63         */
  64        set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec,
  65                                        -tk->wall_to_monotonic.tv_nsec);
  66        WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64);
  67        tk->wall_to_monotonic = wtm;
  68        set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
  69        tk->offs_real = timespec_to_ktime(tmp);
  70}
  71
  72static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
  73{
  74        /* Verify consistency before modifying */
  75        WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64);
  76
  77        tk->total_sleep_time    = t;
  78        tk->offs_boot           = timespec_to_ktime(t);
  79}
  80
  81/**
  82 * timekeeper_setup_internals - Set up internals to use clocksource clock.
  83 *
  84 * @clock:              Pointer to clocksource.
  85 *
  86 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
  87 * pair and interval request.
  88 *
  89 * Unless you're the timekeeping code, you should not be using this!
  90 */
  91static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
  92{
  93        cycle_t interval;
  94        u64 tmp, ntpinterval;
  95        struct clocksource *old_clock;
  96
  97        old_clock = tk->clock;
  98        tk->clock = clock;
  99        clock->cycle_last = clock->read(clock);
 100
 101        /* Do the ns -> cycle conversion first, using original mult */
 102        tmp = NTP_INTERVAL_LENGTH;
 103        tmp <<= clock->shift;
 104        ntpinterval = tmp;
 105        tmp += clock->mult/2;
 106        do_div(tmp, clock->mult);
 107        if (tmp == 0)
 108                tmp = 1;
 109
 110        interval = (cycle_t) tmp;
 111        tk->cycle_interval = interval;
 112
 113        /* Go back from cycles -> shifted ns */
 114        tk->xtime_interval = (u64) interval * clock->mult;
 115        tk->xtime_remainder = ntpinterval - tk->xtime_interval;
 116        tk->raw_interval =
 117                ((u64) interval * clock->mult) >> clock->shift;
 118
 119         /* if changing clocks, convert xtime_nsec shift units */
 120        if (old_clock) {
 121                int shift_change = clock->shift - old_clock->shift;
 122                if (shift_change < 0)
 123                        tk->xtime_nsec >>= -shift_change;
 124                else
 125                        tk->xtime_nsec <<= shift_change;
 126        }
 127        tk->shift = clock->shift;
 128
 129        tk->ntp_error = 0;
 130        tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
 131
 132        /*
 133         * The timekeeper keeps its own mult values for the currently
 134         * active clocksource. These value will be adjusted via NTP
 135         * to counteract clock drifting.
 136         */
 137        tk->mult = clock->mult;
 138}
 139
 140/* Timekeeper helper functions. */
 141
 142#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
 143u32 (*arch_gettimeoffset)(void);
 144
 145u32 get_arch_timeoffset(void)
 146{
 147        if (likely(arch_gettimeoffset))
 148                return arch_gettimeoffset();
 149        return 0;
 150}
 151#else
 152static inline u32 get_arch_timeoffset(void) { return 0; }
 153#endif
 154
 155static inline s64 timekeeping_get_ns(struct timekeeper *tk)
 156{
 157        cycle_t cycle_now, cycle_delta;
 158        struct clocksource *clock;
 159        s64 nsec;
 160
 161        /* read clocksource: */
 162        clock = tk->clock;
 163        cycle_now = clock->read(clock);
 164
 165        /* calculate the delta since the last update_wall_time: */
 166        cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
 167
 168        nsec = cycle_delta * tk->mult + tk->xtime_nsec;
 169        nsec >>= tk->shift;
 170
 171        /* If arch requires, add in get_arch_timeoffset() */
 172        return nsec + get_arch_timeoffset();
 173}
 174
 175static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
 176{
 177        cycle_t cycle_now, cycle_delta;
 178        struct clocksource *clock;
 179        s64 nsec;
 180
 181        /* read clocksource: */
 182        clock = tk->clock;
 183        cycle_now = clock->read(clock);
 184
 185        /* calculate the delta since the last update_wall_time: */
 186        cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
 187
 188        /* convert delta to nanoseconds. */
 189        nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
 190
 191        /* If arch requires, add in get_arch_timeoffset() */
 192        return nsec + get_arch_timeoffset();
 193}
 194
 195static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
 196
 197static void update_pvclock_gtod(struct timekeeper *tk)
 198{
 199        raw_notifier_call_chain(&pvclock_gtod_chain, 0, tk);
 200}
 201
 202/**
 203 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
 204 *
 205 * Must hold write on timekeeper.lock
 206 */
 207int pvclock_gtod_register_notifier(struct notifier_block *nb)
 208{
 209        struct timekeeper *tk = &timekeeper;
 210        unsigned long flags;
 211        int ret;
 212
 213        write_seqlock_irqsave(&tk->lock, flags);
 214        ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
 215        /* update timekeeping data */
 216        update_pvclock_gtod(tk);
 217        write_sequnlock_irqrestore(&tk->lock, flags);
 218
 219        return ret;
 220}
 221EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
 222
 223/**
 224 * pvclock_gtod_unregister_notifier - unregister a pvclock
 225 * timedata update listener
 226 *
 227 * Must hold write on timekeeper.lock
 228 */
 229int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
 230{
 231        struct timekeeper *tk = &timekeeper;
 232        unsigned long flags;
 233        int ret;
 234
 235        write_seqlock_irqsave(&tk->lock, flags);
 236        ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
 237        write_sequnlock_irqrestore(&tk->lock, flags);
 238
 239        return ret;
 240}
 241EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
 242
 243/* must hold write on timekeeper.lock */
 244static void timekeeping_update(struct timekeeper *tk, bool clearntp)
 245{
 246        if (clearntp) {
 247                tk->ntp_error = 0;
 248                ntp_clear();
 249        }
 250        update_vsyscall(tk);
 251        update_pvclock_gtod(tk);
 252}
 253
 254/**
 255 * timekeeping_forward_now - update clock to the current time
 256 *
 257 * Forward the current clock to update its state since the last call to
 258 * update_wall_time(). This is useful before significant clock changes,
 259 * as it avoids having to deal with this time offset explicitly.
 260 */
 261static void timekeeping_forward_now(struct timekeeper *tk)
 262{
 263        cycle_t cycle_now, cycle_delta;
 264        struct clocksource *clock;
 265        s64 nsec;
 266
 267        clock = tk->clock;
 268        cycle_now = clock->read(clock);
 269        cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
 270        clock->cycle_last = cycle_now;
 271
 272        tk->xtime_nsec += cycle_delta * tk->mult;
 273
 274        /* If arch requires, add in get_arch_timeoffset() */
 275        tk->xtime_nsec += (u64)get_arch_timeoffset() << tk->shift;
 276
 277        tk_normalize_xtime(tk);
 278
 279        nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
 280        timespec_add_ns(&tk->raw_time, nsec);
 281}
 282
 283/**
 284 * __getnstimeofday - Returns the time of day in a timespec.
 285 * @ts:         pointer to the timespec to be set
 286 *
 287 * Updates the time of day in the timespec.
 288 * Returns 0 on success, or -ve when suspended (timespec will be undefined).
 289 */
 290int __getnstimeofday(struct timespec *ts)
 291{
 292        struct timekeeper *tk = &timekeeper;
 293        unsigned long seq;
 294        s64 nsecs = 0;
 295
 296        do {
 297                seq = read_seqbegin(&tk->lock);
 298
 299                ts->tv_sec = tk->xtime_sec;
 300                nsecs = timekeeping_get_ns(tk);
 301
 302        } while (read_seqretry(&tk->lock, seq));
 303
 304        ts->tv_nsec = 0;
 305        timespec_add_ns(ts, nsecs);
 306
 307        /*
 308         * Do not bail out early, in case there were callers still using
 309         * the value, even in the face of the WARN_ON.
 310         */
 311        if (unlikely(timekeeping_suspended))
 312                return -EAGAIN;
 313        return 0;
 314}
 315EXPORT_SYMBOL(__getnstimeofday);
 316
 317/**
 318 * getnstimeofday - Returns the time of day in a timespec.
 319 * @ts:         pointer to the timespec to be set
 320 *
 321 * Returns the time of day in a timespec (WARN if suspended).
 322 */
 323void getnstimeofday(struct timespec *ts)
 324{
 325        WARN_ON(__getnstimeofday(ts));
 326}
 327EXPORT_SYMBOL(getnstimeofday);
 328
 329ktime_t ktime_get(void)
 330{
 331        struct timekeeper *tk = &timekeeper;
 332        unsigned int seq;
 333        s64 secs, nsecs;
 334
 335        WARN_ON(timekeeping_suspended);
 336
 337        do {
 338                seq = read_seqbegin(&tk->lock);
 339                secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
 340                nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec;
 341
 342        } while (read_seqretry(&tk->lock, seq));
 343        /*
 344         * Use ktime_set/ktime_add_ns to create a proper ktime on
 345         * 32-bit architectures without CONFIG_KTIME_SCALAR.
 346         */
 347        return ktime_add_ns(ktime_set(secs, 0), nsecs);
 348}
 349EXPORT_SYMBOL_GPL(ktime_get);
 350
 351/**
 352 * ktime_get_ts - get the monotonic clock in timespec format
 353 * @ts:         pointer to timespec variable
 354 *
 355 * The function calculates the monotonic clock from the realtime
 356 * clock and the wall_to_monotonic offset and stores the result
 357 * in normalized timespec format in the variable pointed to by @ts.
 358 */
 359void ktime_get_ts(struct timespec *ts)
 360{
 361        struct timekeeper *tk = &timekeeper;
 362        struct timespec tomono;
 363        s64 nsec;
 364        unsigned int seq;
 365
 366        WARN_ON(timekeeping_suspended);
 367
 368        do {
 369                seq = read_seqbegin(&tk->lock);
 370                ts->tv_sec = tk->xtime_sec;
 371                nsec = timekeeping_get_ns(tk);
 372                tomono = tk->wall_to_monotonic;
 373
 374        } while (read_seqretry(&tk->lock, seq));
 375
 376        ts->tv_sec += tomono.tv_sec;
 377        ts->tv_nsec = 0;
 378        timespec_add_ns(ts, nsec + tomono.tv_nsec);
 379}
 380EXPORT_SYMBOL_GPL(ktime_get_ts);
 381
 382#ifdef CONFIG_NTP_PPS
 383
 384/**
 385 * getnstime_raw_and_real - get day and raw monotonic time in timespec format
 386 * @ts_raw:     pointer to the timespec to be set to raw monotonic time
 387 * @ts_real:    pointer to the timespec to be set to the time of day
 388 *
 389 * This function reads both the time of day and raw monotonic time at the
 390 * same time atomically and stores the resulting timestamps in timespec
 391 * format.
 392 */
 393void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
 394{
 395        struct timekeeper *tk = &timekeeper;
 396        unsigned long seq;
 397        s64 nsecs_raw, nsecs_real;
 398
 399        WARN_ON_ONCE(timekeeping_suspended);
 400
 401        do {
 402                seq = read_seqbegin(&tk->lock);
 403
 404                *ts_raw = tk->raw_time;
 405                ts_real->tv_sec = tk->xtime_sec;
 406                ts_real->tv_nsec = 0;
 407
 408                nsecs_raw = timekeeping_get_ns_raw(tk);
 409                nsecs_real = timekeeping_get_ns(tk);
 410
 411        } while (read_seqretry(&tk->lock, seq));
 412
 413        timespec_add_ns(ts_raw, nsecs_raw);
 414        timespec_add_ns(ts_real, nsecs_real);
 415}
 416EXPORT_SYMBOL(getnstime_raw_and_real);
 417
 418#endif /* CONFIG_NTP_PPS */
 419
 420/**
 421 * do_gettimeofday - Returns the time of day in a timeval
 422 * @tv:         pointer to the timeval to be set
 423 *
 424 * NOTE: Users should be converted to using getnstimeofday()
 425 */
 426void do_gettimeofday(struct timeval *tv)
 427{
 428        struct timespec now;
 429
 430        getnstimeofday(&now);
 431        tv->tv_sec = now.tv_sec;
 432        tv->tv_usec = now.tv_nsec/1000;
 433}
 434EXPORT_SYMBOL(do_gettimeofday);
 435
 436/**
 437 * do_settimeofday - Sets the time of day
 438 * @tv:         pointer to the timespec variable containing the new time
 439 *
 440 * Sets the time of day to the new time and update NTP and notify hrtimers
 441 */
 442int do_settimeofday(const struct timespec *tv)
 443{
 444        struct timekeeper *tk = &timekeeper;
 445        struct timespec ts_delta, xt;
 446        unsigned long flags;
 447
 448        if (!timespec_valid_strict(tv))
 449                return -EINVAL;
 450
 451        write_seqlock_irqsave(&tk->lock, flags);
 452
 453        timekeeping_forward_now(tk);
 454
 455        xt = tk_xtime(tk);
 456        ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
 457        ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;
 458
 459        tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta));
 460
 461        tk_set_xtime(tk, tv);
 462
 463        timekeeping_update(tk, true);
 464
 465        write_sequnlock_irqrestore(&tk->lock, flags);
 466
 467        /* signal hrtimers about time change */
 468        clock_was_set();
 469
 470        return 0;
 471}
 472EXPORT_SYMBOL(do_settimeofday);
 473
 474/**
 475 * timekeeping_inject_offset - Adds or subtracts from the current time.
 476 * @tv:         pointer to the timespec variable containing the offset
 477 *
 478 * Adds or subtracts an offset value from the current time.
 479 */
 480int timekeeping_inject_offset(struct timespec *ts)
 481{
 482        struct timekeeper *tk = &timekeeper;
 483        unsigned long flags;
 484        struct timespec tmp;
 485        int ret = 0;
 486
 487        if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
 488                return -EINVAL;
 489
 490        write_seqlock_irqsave(&tk->lock, flags);
 491
 492        timekeeping_forward_now(tk);
 493
 494        /* Make sure the proposed value is valid */
 495        tmp = timespec_add(tk_xtime(tk),  *ts);
 496        if (!timespec_valid_strict(&tmp)) {
 497                ret = -EINVAL;
 498                goto error;
 499        }
 500
 501        tk_xtime_add(tk, ts);
 502        tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts));
 503
 504error: /* even if we error out, we forwarded the time, so call update */
 505        timekeeping_update(tk, true);
 506
 507        write_sequnlock_irqrestore(&tk->lock, flags);
 508
 509        /* signal hrtimers about time change */
 510        clock_was_set();
 511
 512        return ret;
 513}
 514EXPORT_SYMBOL(timekeeping_inject_offset);
 515
 516/**
 517 * change_clocksource - Swaps clocksources if a new one is available
 518 *
 519 * Accumulates current time interval and initializes new clocksource
 520 */
 521static int change_clocksource(void *data)
 522{
 523        struct timekeeper *tk = &timekeeper;
 524        struct clocksource *new, *old;
 525        unsigned long flags;
 526
 527        new = (struct clocksource *) data;
 528
 529        write_seqlock_irqsave(&tk->lock, flags);
 530
 531        timekeeping_forward_now(tk);
 532        if (!new->enable || new->enable(new) == 0) {
 533                old = tk->clock;
 534                tk_setup_internals(tk, new);
 535                if (old->disable)
 536                        old->disable(old);
 537        }
 538        timekeeping_update(tk, true);
 539
 540        write_sequnlock_irqrestore(&tk->lock, flags);
 541
 542        return 0;
 543}
 544
 545/**
 546 * timekeeping_notify - Install a new clock source
 547 * @clock:              pointer to the clock source
 548 *
 549 * This function is called from clocksource.c after a new, better clock
 550 * source has been registered. The caller holds the clocksource_mutex.
 551 */
 552void timekeeping_notify(struct clocksource *clock)
 553{
 554        struct timekeeper *tk = &timekeeper;
 555
 556        if (tk->clock == clock)
 557                return;
 558        stop_machine(change_clocksource, clock, NULL);
 559        tick_clock_notify();
 560}
 561
 562/**
 563 * ktime_get_real - get the real (wall-) time in ktime_t format
 564 *
 565 * returns the time in ktime_t format
 566 */
 567ktime_t ktime_get_real(void)
 568{
 569        struct timespec now;
 570
 571        getnstimeofday(&now);
 572
 573        return timespec_to_ktime(now);
 574}
 575EXPORT_SYMBOL_GPL(ktime_get_real);
 576
 577/**
 578 * getrawmonotonic - Returns the raw monotonic time in a timespec
 579 * @ts:         pointer to the timespec to be set
 580 *
 581 * Returns the raw monotonic time (completely un-modified by ntp)
 582 */
 583void getrawmonotonic(struct timespec *ts)
 584{
 585        struct timekeeper *tk = &timekeeper;
 586        unsigned long seq;
 587        s64 nsecs;
 588
 589        do {
 590                seq = read_seqbegin(&tk->lock);
 591                nsecs = timekeeping_get_ns_raw(tk);
 592                *ts = tk->raw_time;
 593
 594        } while (read_seqretry(&tk->lock, seq));
 595
 596        timespec_add_ns(ts, nsecs);
 597}
 598EXPORT_SYMBOL(getrawmonotonic);
 599
 600/**
 601 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
 602 */
 603int timekeeping_valid_for_hres(void)
 604{
 605        struct timekeeper *tk = &timekeeper;
 606        unsigned long seq;
 607        int ret;
 608
 609        do {
 610                seq = read_seqbegin(&tk->lock);
 611
 612                ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
 613
 614        } while (read_seqretry(&tk->lock, seq));
 615
 616        return ret;
 617}
 618
 619/**
 620 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
 621 */
 622u64 timekeeping_max_deferment(void)
 623{
 624        struct timekeeper *tk = &timekeeper;
 625        unsigned long seq;
 626        u64 ret;
 627
 628        do {
 629                seq = read_seqbegin(&tk->lock);
 630
 631                ret = tk->clock->max_idle_ns;
 632
 633        } while (read_seqretry(&tk->lock, seq));
 634
 635        return ret;
 636}
 637
 638/**
 639 * read_persistent_clock -  Return time from the persistent clock.
 640 *
 641 * Weak dummy function for arches that do not yet support it.
 642 * Reads the time from the battery backed persistent clock.
 643 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
 644 *
 645 *  XXX - Do be sure to remove it once all arches implement it.
 646 */
 647void __attribute__((weak)) read_persistent_clock(struct timespec *ts)
 648{
 649        ts->tv_sec = 0;
 650        ts->tv_nsec = 0;
 651}
 652
 653/**
 654 * read_boot_clock -  Return time of the system start.
 655 *
 656 * Weak dummy function for arches that do not yet support it.
 657 * Function to read the exact time the system has been started.
 658 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
 659 *
 660 *  XXX - Do be sure to remove it once all arches implement it.
 661 */
 662void __attribute__((weak)) read_boot_clock(struct timespec *ts)
 663{
 664        ts->tv_sec = 0;
 665        ts->tv_nsec = 0;
 666}
 667
 668/*
 669 * timekeeping_init - Initializes the clocksource and common timekeeping values
 670 */
 671void __init timekeeping_init(void)
 672{
 673        struct timekeeper *tk = &timekeeper;
 674        struct clocksource *clock;
 675        unsigned long flags;
 676        struct timespec now, boot, tmp;
 677
 678        read_persistent_clock(&now);
 679
 680        if (!timespec_valid_strict(&now)) {
 681                pr_warn("WARNING: Persistent clock returned invalid value!\n"
 682                        "         Check your CMOS/BIOS settings.\n");
 683                now.tv_sec = 0;
 684                now.tv_nsec = 0;
 685        } else if (now.tv_sec || now.tv_nsec)
 686                persistent_clock_exist = true;
 687
 688        read_boot_clock(&boot);
 689        if (!timespec_valid_strict(&boot)) {
 690                pr_warn("WARNING: Boot clock returned invalid value!\n"
 691                        "         Check your CMOS/BIOS settings.\n");
 692                boot.tv_sec = 0;
 693                boot.tv_nsec = 0;
 694        }
 695
 696        seqlock_init(&tk->lock);
 697
 698        ntp_init();
 699
 700        write_seqlock_irqsave(&tk->lock, flags);
 701        clock = clocksource_default_clock();
 702        if (clock->enable)
 703                clock->enable(clock);
 704        tk_setup_internals(tk, clock);
 705
 706        tk_set_xtime(tk, &now);
 707        tk->raw_time.tv_sec = 0;
 708        tk->raw_time.tv_nsec = 0;
 709        if (boot.tv_sec == 0 && boot.tv_nsec == 0)
 710                boot = tk_xtime(tk);
 711
 712        set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec);
 713        tk_set_wall_to_mono(tk, tmp);
 714
 715        tmp.tv_sec = 0;
 716        tmp.tv_nsec = 0;
 717        tk_set_sleep_time(tk, tmp);
 718
 719        write_sequnlock_irqrestore(&tk->lock, flags);
 720}
 721
 722/* time in seconds when suspend began */
 723static struct timespec timekeeping_suspend_time;
 724
 725/**
 726 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
 727 * @delta: pointer to a timespec delta value
 728 *
 729 * Takes a timespec offset measuring a suspend interval and properly
 730 * adds the sleep offset to the timekeeping variables.
 731 */
 732static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
 733                                                        struct timespec *delta)
 734{
 735        if (!timespec_valid_strict(delta)) {
 736                printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
 737                                        "sleep delta value!\n");
 738                return;
 739        }
 740        tk_xtime_add(tk, delta);
 741        tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta));
 742        tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta));
 743}
 744
 745/**
 746 * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
 747 * @delta: pointer to a timespec delta value
 748 *
 749 * This hook is for architectures that cannot support read_persistent_clock
 750 * because their RTC/persistent clock is only accessible when irqs are enabled.
 751 *
 752 * This function should only be called by rtc_resume(), and allows
 753 * a suspend offset to be injected into the timekeeping values.
 754 */
 755void timekeeping_inject_sleeptime(struct timespec *delta)
 756{
 757        struct timekeeper *tk = &timekeeper;
 758        unsigned long flags;
 759
 760        /*
 761         * Make sure we don't set the clock twice, as timekeeping_resume()
 762         * already did it
 763         */
 764        if (has_persistent_clock())
 765                return;
 766
 767        write_seqlock_irqsave(&tk->lock, flags);
 768
 769        timekeeping_forward_now(tk);
 770
 771        __timekeeping_inject_sleeptime(tk, delta);
 772
 773        timekeeping_update(tk, true);
 774
 775        write_sequnlock_irqrestore(&tk->lock, flags);
 776
 777        /* signal hrtimers about time change */
 778        clock_was_set();
 779}
 780
 781/**
 782 * timekeeping_resume - Resumes the generic timekeeping subsystem.
 783 *
 784 * This is for the generic clocksource timekeeping.
 785 * xtime/wall_to_monotonic/jiffies/etc are
 786 * still managed by arch specific suspend/resume code.
 787 */
 788static void timekeeping_resume(void)
 789{
 790        struct timekeeper *tk = &timekeeper;
 791        unsigned long flags;
 792        struct timespec ts;
 793
 794        read_persistent_clock(&ts);
 795
 796        clockevents_resume();
 797        clocksource_resume();
 798
 799        write_seqlock_irqsave(&tk->lock, flags);
 800
 801        if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
 802                ts = timespec_sub(ts, timekeeping_suspend_time);
 803                __timekeeping_inject_sleeptime(tk, &ts);
 804        }
 805        /* re-base the last cycle value */
 806        tk->clock->cycle_last = tk->clock->read(tk->clock);
 807        tk->ntp_error = 0;
 808        timekeeping_suspended = 0;
 809        timekeeping_update(tk, false);
 810        write_sequnlock_irqrestore(&tk->lock, flags);
 811
 812        touch_softlockup_watchdog();
 813
 814        clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
 815
 816        /* Resume hrtimers */
 817        hrtimers_resume();
 818}
 819
 820static int timekeeping_suspend(void)
 821{
 822        struct timekeeper *tk = &timekeeper;
 823        unsigned long flags;
 824        struct timespec         delta, delta_delta;
 825        static struct timespec  old_delta;
 826
 827        read_persistent_clock(&timekeeping_suspend_time);
 828
 829        write_seqlock_irqsave(&tk->lock, flags);
 830        timekeeping_forward_now(tk);
 831        timekeeping_suspended = 1;
 832
 833        /*
 834         * To avoid drift caused by repeated suspend/resumes,
 835         * which each can add ~1 second drift error,
 836         * try to compensate so the difference in system time
 837         * and persistent_clock time stays close to constant.
 838         */
 839        delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time);
 840        delta_delta = timespec_sub(delta, old_delta);
 841        if (abs(delta_delta.tv_sec)  >= 2) {
 842                /*
 843                 * if delta_delta is too large, assume time correction
 844                 * has occured and set old_delta to the current delta.
 845                 */
 846                old_delta = delta;
 847        } else {
 848                /* Otherwise try to adjust old_system to compensate */
 849                timekeeping_suspend_time =
 850                        timespec_add(timekeeping_suspend_time, delta_delta);
 851        }
 852        write_sequnlock_irqrestore(&tk->lock, flags);
 853
 854        clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
 855        clocksource_suspend();
 856        clockevents_suspend();
 857
 858        return 0;
 859}
 860
 861/* sysfs resume/suspend bits for timekeeping */
 862static struct syscore_ops timekeeping_syscore_ops = {
 863        .resume         = timekeeping_resume,
 864        .suspend        = timekeeping_suspend,
 865};
 866
 867static int __init timekeeping_init_ops(void)
 868{
 869        register_syscore_ops(&timekeeping_syscore_ops);
 870        return 0;
 871}
 872
 873device_initcall(timekeeping_init_ops);
 874
 875/*
 876 * If the error is already larger, we look ahead even further
 877 * to compensate for late or lost adjustments.
 878 */
 879static __always_inline int timekeeping_bigadjust(struct timekeeper *tk,
 880                                                 s64 error, s64 *interval,
 881                                                 s64 *offset)
 882{
 883        s64 tick_error, i;
 884        u32 look_ahead, adj;
 885        s32 error2, mult;
 886
 887        /*
 888         * Use the current error value to determine how much to look ahead.
 889         * The larger the error the slower we adjust for it to avoid problems
 890         * with losing too many ticks, otherwise we would overadjust and
 891         * produce an even larger error.  The smaller the adjustment the
 892         * faster we try to adjust for it, as lost ticks can do less harm
 893         * here.  This is tuned so that an error of about 1 msec is adjusted
 894         * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
 895         */
 896        error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
 897        error2 = abs(error2);
 898        for (look_ahead = 0; error2 > 0; look_ahead++)
 899                error2 >>= 2;
 900
 901        /*
 902         * Now calculate the error in (1 << look_ahead) ticks, but first
 903         * remove the single look ahead already included in the error.
 904         */
 905        tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1);
 906        tick_error -= tk->xtime_interval >> 1;
 907        error = ((error - tick_error) >> look_ahead) + tick_error;
 908
 909        /* Finally calculate the adjustment shift value.  */
 910        i = *interval;
 911        mult = 1;
 912        if (error < 0) {
 913                error = -error;
 914                *interval = -*interval;
 915                *offset = -*offset;
 916                mult = -1;
 917        }
 918        for (adj = 0; error > i; adj++)
 919                error >>= 1;
 920
 921        *interval <<= adj;
 922        *offset <<= adj;
 923        return mult << adj;
 924}
 925
 926/*
 927 * Adjust the multiplier to reduce the error value,
 928 * this is optimized for the most common adjustments of -1,0,1,
 929 * for other values we can do a bit more work.
 930 */
 931static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
 932{
 933        s64 error, interval = tk->cycle_interval;
 934        int adj;
 935
 936        /*
 937         * The point of this is to check if the error is greater than half
 938         * an interval.
 939         *
 940         * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
 941         *
 942         * Note we subtract one in the shift, so that error is really error*2.
 943         * This "saves" dividing(shifting) interval twice, but keeps the
 944         * (error > interval) comparison as still measuring if error is
 945         * larger than half an interval.
 946         *
 947         * Note: It does not "save" on aggravation when reading the code.
 948         */
 949        error = tk->ntp_error >> (tk->ntp_error_shift - 1);
 950        if (error > interval) {
 951                /*
 952                 * We now divide error by 4(via shift), which checks if
 953                 * the error is greater than twice the interval.
 954                 * If it is greater, we need a bigadjust, if its smaller,
 955                 * we can adjust by 1.
 956                 */
 957                error >>= 2;
 958                /*
 959                 * XXX - In update_wall_time, we round up to the next
 960                 * nanosecond, and store the amount rounded up into
 961                 * the error. This causes the likely below to be unlikely.
 962                 *
 963                 * The proper fix is to avoid rounding up by using
 964                 * the high precision tk->xtime_nsec instead of
 965                 * xtime.tv_nsec everywhere. Fixing this will take some
 966                 * time.
 967                 */
 968                if (likely(error <= interval))
 969                        adj = 1;
 970                else
 971                        adj = timekeeping_bigadjust(tk, error, &interval, &offset);
 972        } else {
 973                if (error < -interval) {
 974                        /* See comment above, this is just switched for the negative */
 975                        error >>= 2;
 976                        if (likely(error >= -interval)) {
 977                                adj = -1;
 978                                interval = -interval;
 979                                offset = -offset;
 980                        } else {
 981                                adj = timekeeping_bigadjust(tk, error, &interval, &offset);
 982                        }
 983                } else {
 984                        goto out_adjust;
 985                }
 986        }
 987
 988        if (unlikely(tk->clock->maxadj &&
 989                (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
 990                printk_once(KERN_WARNING
 991                        "Adjusting %s more than 11%% (%ld vs %ld)\n",
 992                        tk->clock->name, (long)tk->mult + adj,
 993                        (long)tk->clock->mult + tk->clock->maxadj);
 994        }
 995        /*
 996         * So the following can be confusing.
 997         *
 998         * To keep things simple, lets assume adj == 1 for now.
 999         *
1000         * When adj != 1, remember that the interval and offset values
1001         * have been appropriately scaled so the math is the same.
1002         *
1003         * The basic idea here is that we're increasing the multiplier
1004         * by one, this causes the xtime_interval to be incremented by
1005         * one cycle_interval. This is because:
1006         *      xtime_interval = cycle_interval * mult
1007         * So if mult is being incremented by one:
1008         *      xtime_interval = cycle_interval * (mult + 1)
1009         * Its the same as:
1010         *      xtime_interval = (cycle_interval * mult) + cycle_interval
1011         * Which can be shortened to:
1012         *      xtime_interval += cycle_interval
1013         *
1014         * So offset stores the non-accumulated cycles. Thus the current
1015         * time (in shifted nanoseconds) is:
1016         *      now = (offset * adj) + xtime_nsec
1017         * Now, even though we're adjusting the clock frequency, we have
1018         * to keep time consistent. In other words, we can't jump back
1019         * in time, and we also want to avoid jumping forward in time.
1020         *
1021         * So given the same offset value, we need the time to be the same
1022         * both before and after the freq adjustment.
1023         *      now = (offset * adj_1) + xtime_nsec_1
1024         *      now = (offset * adj_2) + xtime_nsec_2
1025         * So:
1026         *      (offset * adj_1) + xtime_nsec_1 =
1027         *              (offset * adj_2) + xtime_nsec_2
1028         * And we know:
1029         *      adj_2 = adj_1 + 1
1030         * So:
1031         *      (offset * adj_1) + xtime_nsec_1 =
1032         *              (offset * (adj_1+1)) + xtime_nsec_2
1033         *      (offset * adj_1) + xtime_nsec_1 =
1034         *              (offset * adj_1) + offset + xtime_nsec_2
1035         * Canceling the sides:
1036         *      xtime_nsec_1 = offset + xtime_nsec_2
1037         * Which gives us:
1038         *      xtime_nsec_2 = xtime_nsec_1 - offset
1039         * Which simplfies to:
1040         *      xtime_nsec -= offset
1041         *
1042         * XXX - TODO: Doc ntp_error calculation.
1043         */
1044        tk->mult += adj;
1045        tk->xtime_interval += interval;
1046        tk->xtime_nsec -= offset;
1047        tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1048
1049out_adjust:
1050        /*
1051         * It may be possible that when we entered this function, xtime_nsec
1052         * was very small.  Further, if we're slightly speeding the clocksource
1053         * in the code above, its possible the required corrective factor to
1054         * xtime_nsec could cause it to underflow.
1055         *
1056         * Now, since we already accumulated the second, cannot simply roll
1057         * the accumulated second back, since the NTP subsystem has been
1058         * notified via second_overflow. So instead we push xtime_nsec forward
1059         * by the amount we underflowed, and add that amount into the error.
1060         *
1061         * We'll correct this error next time through this function, when
1062         * xtime_nsec is not as small.
1063         */
1064        if (unlikely((s64)tk->xtime_nsec < 0)) {
1065                s64 neg = -(s64)tk->xtime_nsec;
1066                tk->xtime_nsec = 0;
1067                tk->ntp_error += neg << tk->ntp_error_shift;
1068        }
1069
1070}
1071
1072/**
1073 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
1074 *
1075 * Helper function that accumulates a the nsecs greater then a second
1076 * from the xtime_nsec field to the xtime_secs field.
1077 * It also calls into the NTP code to handle leapsecond processing.
1078 *
1079 */
1080static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
1081{
1082        u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
1083
1084        while (tk->xtime_nsec >= nsecps) {
1085                int leap;
1086
1087                tk->xtime_nsec -= nsecps;
1088                tk->xtime_sec++;
1089
1090                /* Figure out if its a leap sec and apply if needed */
1091                leap = second_overflow(tk->xtime_sec);
1092                if (unlikely(leap)) {
1093                        struct timespec ts;
1094
1095                        tk->xtime_sec += leap;
1096
1097                        ts.tv_sec = leap;
1098                        ts.tv_nsec = 0;
1099                        tk_set_wall_to_mono(tk,
1100                                timespec_sub(tk->wall_to_monotonic, ts));
1101
1102                        clock_was_set_delayed();
1103                }
1104        }
1105}
1106
1107/**
1108 * logarithmic_accumulation - shifted accumulation of cycles
1109 *
1110 * This functions accumulates a shifted interval of cycles into
1111 * into a shifted interval nanoseconds. Allows for O(log) accumulation
1112 * loop.
1113 *
1114 * Returns the unconsumed cycles.
1115 */
1116static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1117                                                u32 shift)
1118{
1119        u64 raw_nsecs;
1120
1121        /* If the offset is smaller then a shifted interval, do nothing */
1122        if (offset < tk->cycle_interval<<shift)
1123                return offset;
1124
1125        /* Accumulate one shifted interval */
1126        offset -= tk->cycle_interval << shift;
1127        tk->clock->cycle_last += tk->cycle_interval << shift;
1128
1129        tk->xtime_nsec += tk->xtime_interval << shift;
1130        accumulate_nsecs_to_secs(tk);
1131
1132        /* Accumulate raw time */
1133        raw_nsecs = (u64)tk->raw_interval << shift;
1134        raw_nsecs += tk->raw_time.tv_nsec;
1135        if (raw_nsecs >= NSEC_PER_SEC) {
1136                u64 raw_secs = raw_nsecs;
1137                raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
1138                tk->raw_time.tv_sec += raw_secs;
1139        }
1140        tk->raw_time.tv_nsec = raw_nsecs;
1141
1142        /* Accumulate error between NTP and clock interval */
1143        tk->ntp_error += ntp_tick_length() << shift;
1144        tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
1145                                                (tk->ntp_error_shift + shift);
1146
1147        return offset;
1148}
1149
1150#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
1151static inline void old_vsyscall_fixup(struct timekeeper *tk)
1152{
1153        s64 remainder;
1154
1155        /*
1156        * Store only full nanoseconds into xtime_nsec after rounding
1157        * it up and add the remainder to the error difference.
1158        * XXX - This is necessary to avoid small 1ns inconsistnecies caused
1159        * by truncating the remainder in vsyscalls. However, it causes
1160        * additional work to be done in timekeeping_adjust(). Once
1161        * the vsyscall implementations are converted to use xtime_nsec
1162        * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
1163        * users are removed, this can be killed.
1164        */
1165        remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
1166        tk->xtime_nsec -= remainder;
1167        tk->xtime_nsec += 1ULL << tk->shift;
1168        tk->ntp_error += remainder << tk->ntp_error_shift;
1169
1170}
1171#else
1172#define old_vsyscall_fixup(tk)
1173#endif
1174
1175
1176
1177/**
1178 * update_wall_time - Uses the current clocksource to increment the wall time
1179 *
1180 */
1181static void update_wall_time(void)
1182{
1183        struct clocksource *clock;
1184        struct timekeeper *tk = &timekeeper;
1185        cycle_t offset;
1186        int shift = 0, maxshift;
1187        unsigned long flags;
1188
1189        write_seqlock_irqsave(&tk->lock, flags);
1190
1191        /* Make sure we're fully resumed: */
1192        if (unlikely(timekeeping_suspended))
1193                goto out;
1194
1195        clock = tk->clock;
1196
1197#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1198        offset = tk->cycle_interval;
1199#else
1200        offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
1201#endif
1202
1203        /* Check if there's really nothing to do */
1204        if (offset < tk->cycle_interval)
1205                goto out;
1206
1207        /*
1208         * With NO_HZ we may have to accumulate many cycle_intervals
1209         * (think "ticks") worth of time at once. To do this efficiently,
1210         * we calculate the largest doubling multiple of cycle_intervals
1211         * that is smaller than the offset.  We then accumulate that
1212         * chunk in one go, and then try to consume the next smaller
1213         * doubled multiple.
1214         */
1215        shift = ilog2(offset) - ilog2(tk->cycle_interval);
1216        shift = max(0, shift);
1217        /* Bound shift to one less than what overflows tick_length */
1218        maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1219        shift = min(shift, maxshift);
1220        while (offset >= tk->cycle_interval) {
1221                offset = logarithmic_accumulation(tk, offset, shift);
1222                if (offset < tk->cycle_interval<<shift)
1223                        shift--;
1224        }
1225
1226        /* correct the clock when NTP error is too big */
1227        timekeeping_adjust(tk, offset);
1228
1229        /*
1230         * XXX This can be killed once everyone converts
1231         * to the new update_vsyscall.
1232         */
1233        old_vsyscall_fixup(tk);
1234
1235        /*
1236         * Finally, make sure that after the rounding
1237         * xtime_nsec isn't larger than NSEC_PER_SEC
1238         */
1239        accumulate_nsecs_to_secs(tk);
1240
1241        timekeeping_update(tk, false);
1242
1243out:
1244        write_sequnlock_irqrestore(&tk->lock, flags);
1245
1246}
1247
1248/**
1249 * getboottime - Return the real time of system boot.
1250 * @ts:         pointer to the timespec to be set
1251 *
1252 * Returns the wall-time of boot in a timespec.
1253 *
1254 * This is based on the wall_to_monotonic offset and the total suspend
1255 * time. Calls to settimeofday will affect the value returned (which
1256 * basically means that however wrong your real time clock is at boot time,
1257 * you get the right time here).
1258 */
1259void getboottime(struct timespec *ts)
1260{
1261        struct timekeeper *tk = &timekeeper;
1262        struct timespec boottime = {
1263                .tv_sec = tk->wall_to_monotonic.tv_sec +
1264                                tk->total_sleep_time.tv_sec,
1265                .tv_nsec = tk->wall_to_monotonic.tv_nsec +
1266                                tk->total_sleep_time.tv_nsec
1267        };
1268
1269        set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
1270}
1271EXPORT_SYMBOL_GPL(getboottime);
1272
1273/**
1274 * get_monotonic_boottime - Returns monotonic time since boot
1275 * @ts:         pointer to the timespec to be set
1276 *
1277 * Returns the monotonic time since boot in a timespec.
1278 *
1279 * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also
1280 * includes the time spent in suspend.
1281 */
1282void get_monotonic_boottime(struct timespec *ts)
1283{
1284        struct timekeeper *tk = &timekeeper;
1285        struct timespec tomono, sleep;
1286        s64 nsec;
1287        unsigned int seq;
1288
1289        WARN_ON(timekeeping_suspended);
1290
1291        do {
1292                seq = read_seqbegin(&tk->lock);
1293                ts->tv_sec = tk->xtime_sec;
1294                nsec = timekeeping_get_ns(tk);
1295                tomono = tk->wall_to_monotonic;
1296                sleep = tk->total_sleep_time;
1297
1298        } while (read_seqretry(&tk->lock, seq));
1299
1300        ts->tv_sec += tomono.tv_sec + sleep.tv_sec;
1301        ts->tv_nsec = 0;
1302        timespec_add_ns(ts, nsec + tomono.tv_nsec + sleep.tv_nsec);
1303}
1304EXPORT_SYMBOL_GPL(get_monotonic_boottime);
1305
1306/**
1307 * ktime_get_boottime - Returns monotonic time since boot in a ktime
1308 *
1309 * Returns the monotonic time since boot in a ktime
1310 *
1311 * This is similar to CLOCK_MONTONIC/ktime_get, but also
1312 * includes the time spent in suspend.
1313 */
1314ktime_t ktime_get_boottime(void)
1315{
1316        struct timespec ts;
1317
1318        get_monotonic_boottime(&ts);
1319        return timespec_to_ktime(ts);
1320}
1321EXPORT_SYMBOL_GPL(ktime_get_boottime);
1322
1323/**
1324 * monotonic_to_bootbased - Convert the monotonic time to boot based.
1325 * @ts:         pointer to the timespec to be converted
1326 */
1327void monotonic_to_bootbased(struct timespec *ts)
1328{
1329        struct timekeeper *tk = &timekeeper;
1330
1331        *ts = timespec_add(*ts, tk->total_sleep_time);
1332}
1333EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
1334
1335unsigned long get_seconds(void)
1336{
1337        struct timekeeper *tk = &timekeeper;
1338
1339        return tk->xtime_sec;
1340}
1341EXPORT_SYMBOL(get_seconds);
1342
1343struct timespec __current_kernel_time(void)
1344{
1345        struct timekeeper *tk = &timekeeper;
1346
1347        return tk_xtime(tk);
1348}
1349
1350struct timespec current_kernel_time(void)
1351{
1352        struct timekeeper *tk = &timekeeper;
1353        struct timespec now;
1354        unsigned long seq;
1355
1356        do {
1357                seq = read_seqbegin(&tk->lock);
1358
1359                now = tk_xtime(tk);
1360        } while (read_seqretry(&tk->lock, seq));
1361
1362        return now;
1363}
1364EXPORT_SYMBOL(current_kernel_time);
1365
1366struct timespec get_monotonic_coarse(void)
1367{
1368        struct timekeeper *tk = &timekeeper;
1369        struct timespec now, mono;
1370        unsigned long seq;
1371
1372        do {
1373                seq = read_seqbegin(&tk->lock);
1374
1375                now = tk_xtime(tk);
1376                mono = tk->wall_to_monotonic;
1377        } while (read_seqretry(&tk->lock, seq));
1378
1379        set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
1380                                now.tv_nsec + mono.tv_nsec);
1381        return now;
1382}
1383
1384/*
1385 * Must hold jiffies_lock
1386 */
1387void do_timer(unsigned long ticks)
1388{
1389        jiffies_64 += ticks;
1390        update_wall_time();
1391        calc_global_load(ticks);
1392}
1393
1394/**
1395 * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic,
1396 *    and sleep offsets.
1397 * @xtim:       pointer to timespec to be set with xtime
1398 * @wtom:       pointer to timespec to be set with wall_to_monotonic
1399 * @sleep:      pointer to timespec to be set with time in suspend
1400 */
1401void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
1402                                struct timespec *wtom, struct timespec *sleep)
1403{
1404        struct timekeeper *tk = &timekeeper;
1405        unsigned long seq;
1406
1407        do {
1408                seq = read_seqbegin(&tk->lock);
1409                *xtim = tk_xtime(tk);
1410                *wtom = tk->wall_to_monotonic;
1411                *sleep = tk->total_sleep_time;
1412        } while (read_seqretry(&tk->lock, seq));
1413}
1414
1415#ifdef CONFIG_HIGH_RES_TIMERS
1416/**
1417 * ktime_get_update_offsets - hrtimer helper
1418 * @offs_real:  pointer to storage for monotonic -> realtime offset
1419 * @offs_boot:  pointer to storage for monotonic -> boottime offset
1420 *
1421 * Returns current monotonic time and updates the offsets
1422 * Called from hrtimer_interupt() or retrigger_next_event()
1423 */
1424ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
1425{
1426        struct timekeeper *tk = &timekeeper;
1427        ktime_t now;
1428        unsigned int seq;
1429        u64 secs, nsecs;
1430
1431        do {
1432                seq = read_seqbegin(&tk->lock);
1433
1434                secs = tk->xtime_sec;
1435                nsecs = timekeeping_get_ns(tk);
1436
1437                *offs_real = tk->offs_real;
1438                *offs_boot = tk->offs_boot;
1439        } while (read_seqretry(&tk->lock, seq));
1440
1441        now = ktime_add_ns(ktime_set(secs, 0), nsecs);
1442        now = ktime_sub(now, *offs_real);
1443        return now;
1444}
1445#endif
1446
1447/**
1448 * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
1449 */
1450ktime_t ktime_get_monotonic_offset(void)
1451{
1452        struct timekeeper *tk = &timekeeper;
1453        unsigned long seq;
1454        struct timespec wtom;
1455
1456        do {
1457                seq = read_seqbegin(&tk->lock);
1458                wtom = tk->wall_to_monotonic;
1459        } while (read_seqretry(&tk->lock, seq));
1460
1461        return timespec_to_ktime(wtom);
1462}
1463EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
1464
1465/**
1466 * xtime_update() - advances the timekeeping infrastructure
1467 * @ticks:      number of ticks, that have elapsed since the last call.
1468 *
1469 * Must be called with interrupts disabled.
1470 */
1471void xtime_update(unsigned long ticks)
1472{
1473        write_seqlock(&jiffies_lock);
1474        do_timer(ticks);
1475        write_sequnlock(&jiffies_lock);
1476}
1477
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.