linux/kernel/timer.c
<<
>>
Prefs
   1/*
   2 *  linux/kernel/timer.c
   3 *
   4 *  Kernel internal timers, basic process system calls
   5 *
   6 *  Copyright (C) 1991, 1992  Linus Torvalds
   7 *
   8 *  1997-01-28  Modified by Finn Arne Gangstad to make timers scale better.
   9 *
  10 *  1997-09-10  Updated NTP code according to technical memorandum Jan '96
  11 *              "A Kernel Model for Precision Timekeeping" by Dave Mills
  12 *  1998-12-24  Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
  13 *              serialize accesses to xtime/lost_ticks).
  14 *                              Copyright (C) 1998  Andrea Arcangeli
  15 *  1999-03-10  Improved NTP compatibility by Ulrich Windl
  16 *  2002-05-31  Move sys_sysinfo here and make its locking sane, Robert Love
  17 *  2000-10-05  Implemented scalable SMP per-CPU timer handling.
  18 *                              Copyright (C) 2000, 2001, 2002  Ingo Molnar
  19 *              Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
  20 */
  21
  22#include <linux/kernel_stat.h>
  23#include <linux/export.h>
  24#include <linux/interrupt.h>
  25#include <linux/percpu.h>
  26#include <linux/init.h>
  27#include <linux/mm.h>
  28#include <linux/swap.h>
  29#include <linux/pid_namespace.h>
  30#include <linux/notifier.h>
  31#include <linux/thread_info.h>
  32#include <linux/time.h>
  33#include <linux/jiffies.h>
  34#include <linux/posix-timers.h>
  35#include <linux/cpu.h>
  36#include <linux/syscalls.h>
  37#include <linux/delay.h>
  38#include <linux/tick.h>
  39#include <linux/kallsyms.h>
  40#include <linux/irq_work.h>
  41#include <linux/sched.h>
  42#include <linux/slab.h>
  43
  44#include <asm/uaccess.h>
  45#include <asm/unistd.h>
  46#include <asm/div64.h>
  47#include <asm/timex.h>
  48#include <asm/io.h>
  49
  50#define CREATE_TRACE_POINTS
  51#include <trace/events/timer.h>
  52
  53u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
  54
  55EXPORT_SYMBOL(jiffies_64);
  56
  57/*
  58 * per-CPU timer vector definitions:
  59 */
  60#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
  61#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
  62#define TVN_SIZE (1 << TVN_BITS)
  63#define TVR_SIZE (1 << TVR_BITS)
  64#define TVN_MASK (TVN_SIZE - 1)
  65#define TVR_MASK (TVR_SIZE - 1)
  66
  67struct tvec {
  68        struct list_head vec[TVN_SIZE];
  69};
  70
  71struct tvec_root {
  72        struct list_head vec[TVR_SIZE];
  73};
  74
  75struct tvec_base {
  76        spinlock_t lock;
  77        struct timer_list *running_timer;
  78        unsigned long timer_jiffies;
  79        unsigned long next_timer;
  80        unsigned long active_timers;
  81        struct tvec_root tv1;
  82        struct tvec tv2;
  83        struct tvec tv3;
  84        struct tvec tv4;
  85        struct tvec tv5;
  86} ____cacheline_aligned;
  87
  88struct tvec_base boot_tvec_bases;
  89EXPORT_SYMBOL(boot_tvec_bases);
  90static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
  91
  92/* Functions below help us manage 'deferrable' flag */
  93static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
  94{
  95        return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG);
  96}
  97
  98static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
  99{
 100        return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG));
 101}
 102
 103static inline void timer_set_deferrable(struct timer_list *timer)
 104{
 105        timer->base = TBASE_MAKE_DEFERRED(timer->base);
 106}
 107
 108static inline void
 109timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
 110{
 111        timer->base = (struct tvec_base *)((unsigned long)(new_base) |
 112                                      tbase_get_deferrable(timer->base));
 113}
 114
 115static unsigned long round_jiffies_common(unsigned long j, int cpu,
 116                bool force_up)
 117{
 118        int rem;
 119        unsigned long original = j;
 120
 121        /*
 122         * We don't want all cpus firing their timers at once hitting the
 123         * same lock or cachelines, so we skew each extra cpu with an extra
 124         * 3 jiffies. This 3 jiffies came originally from the mm/ code which
 125         * already did this.
 126         * The skew is done by adding 3*cpunr, then round, then subtract this
 127         * extra offset again.
 128         */
 129        j += cpu * 3;
 130
 131        rem = j % HZ;
 132
 133        /*
 134         * If the target jiffie is just after a whole second (which can happen
 135         * due to delays of the timer irq, long irq off times etc etc) then
 136         * we should round down to the whole second, not up. Use 1/4th second
 137         * as cutoff for this rounding as an extreme upper bound for this.
 138         * But never round down if @force_up is set.
 139         */
 140        if (rem < HZ/4 && !force_up) /* round down */
 141                j = j - rem;
 142        else /* round up */
 143                j = j - rem + HZ;
 144
 145        /* now that we have rounded, subtract the extra skew again */
 146        j -= cpu * 3;
 147
 148        if (j <= jiffies) /* rounding ate our timeout entirely; */
 149                return original;
 150        return j;
 151}
 152
 153/**
 154 * __round_jiffies - function to round jiffies to a full second
 155 * @j: the time in (absolute) jiffies that should be rounded
 156 * @cpu: the processor number on which the timeout will happen
 157 *
 158 * __round_jiffies() rounds an absolute time in the future (in jiffies)
 159 * up or down to (approximately) full seconds. This is useful for timers
 160 * for which the exact time they fire does not matter too much, as long as
 161 * they fire approximately every X seconds.
 162 *
 163 * By rounding these timers to whole seconds, all such timers will fire
 164 * at the same time, rather than at various times spread out. The goal
 165 * of this is to have the CPU wake up less, which saves power.
 166 *
 167 * The exact rounding is skewed for each processor to avoid all
 168 * processors firing at the exact same time, which could lead
 169 * to lock contention or spurious cache line bouncing.
 170 *
 171 * The return value is the rounded version of the @j parameter.
 172 */
 173unsigned long __round_jiffies(unsigned long j, int cpu)
 174{
 175        return round_jiffies_common(j, cpu, false);
 176}
 177EXPORT_SYMBOL_GPL(__round_jiffies);
 178
 179/**
 180 * __round_jiffies_relative - function to round jiffies to a full second
 181 * @j: the time in (relative) jiffies that should be rounded
 182 * @cpu: the processor number on which the timeout will happen
 183 *
 184 * __round_jiffies_relative() rounds a time delta  in the future (in jiffies)
 185 * up or down to (approximately) full seconds. This is useful for timers
 186 * for which the exact time they fire does not matter too much, as long as
 187 * they fire approximately every X seconds.
 188 *
 189 * By rounding these timers to whole seconds, all such timers will fire
 190 * at the same time, rather than at various times spread out. The goal
 191 * of this is to have the CPU wake up less, which saves power.
 192 *
 193 * The exact rounding is skewed for each processor to avoid all
 194 * processors firing at the exact same time, which could lead
 195 * to lock contention or spurious cache line bouncing.
 196 *
 197 * The return value is the rounded version of the @j parameter.
 198 */
 199unsigned long __round_jiffies_relative(unsigned long j, int cpu)
 200{
 201        unsigned long j0 = jiffies;
 202
 203        /* Use j0 because jiffies might change while we run */
 204        return round_jiffies_common(j + j0, cpu, false) - j0;
 205}
 206EXPORT_SYMBOL_GPL(__round_jiffies_relative);
 207
 208/**
 209 * round_jiffies - function to round jiffies to a full second
 210 * @j: the time in (absolute) jiffies that should be rounded
 211 *
 212 * round_jiffies() rounds an absolute time in the future (in jiffies)
 213 * up or down to (approximately) full seconds. This is useful for timers
 214 * for which the exact time they fire does not matter too much, as long as
 215 * they fire approximately every X seconds.
 216 *
 217 * By rounding these timers to whole seconds, all such timers will fire
 218 * at the same time, rather than at various times spread out. The goal
 219 * of this is to have the CPU wake up less, which saves power.
 220 *
 221 * The return value is the rounded version of the @j parameter.
 222 */
 223unsigned long round_jiffies(unsigned long j)
 224{
 225        return round_jiffies_common(j, raw_smp_processor_id(), false);
 226}
 227EXPORT_SYMBOL_GPL(round_jiffies);
 228
 229/**
 230 * round_jiffies_relative - function to round jiffies to a full second
 231 * @j: the time in (relative) jiffies that should be rounded
 232 *
 233 * round_jiffies_relative() rounds a time delta  in the future (in jiffies)
 234 * up or down to (approximately) full seconds. This is useful for timers
 235 * for which the exact time they fire does not matter too much, as long as
 236 * they fire approximately every X seconds.
 237 *
 238 * By rounding these timers to whole seconds, all such timers will fire
 239 * at the same time, rather than at various times spread out. The goal
 240 * of this is to have the CPU wake up less, which saves power.
 241 *
 242 * The return value is the rounded version of the @j parameter.
 243 */
 244unsigned long round_jiffies_relative(unsigned long j)
 245{
 246        return __round_jiffies_relative(j, raw_smp_processor_id());
 247}
 248EXPORT_SYMBOL_GPL(round_jiffies_relative);
 249
 250/**
 251 * __round_jiffies_up - function to round jiffies up to a full second
 252 * @j: the time in (absolute) jiffies that should be rounded
 253 * @cpu: the processor number on which the timeout will happen
 254 *
 255 * This is the same as __round_jiffies() except that it will never
 256 * round down.  This is useful for timeouts for which the exact time
 257 * of firing does not matter too much, as long as they don't fire too
 258 * early.
 259 */
 260unsigned long __round_jiffies_up(unsigned long j, int cpu)
 261{
 262        return round_jiffies_common(j, cpu, true);
 263}
 264EXPORT_SYMBOL_GPL(__round_jiffies_up);
 265
 266/**
 267 * __round_jiffies_up_relative - function to round jiffies up to a full second
 268 * @j: the time in (relative) jiffies that should be rounded
 269 * @cpu: the processor number on which the timeout will happen
 270 *
 271 * This is the same as __round_jiffies_relative() except that it will never
 272 * round down.  This is useful for timeouts for which the exact time
 273 * of firing does not matter too much, as long as they don't fire too
 274 * early.
 275 */
 276unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
 277{
 278        unsigned long j0 = jiffies;
 279
 280        /* Use j0 because jiffies might change while we run */
 281        return round_jiffies_common(j + j0, cpu, true) - j0;
 282}
 283EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
 284
 285/**
 286 * round_jiffies_up - function to round jiffies up to a full second
 287 * @j: the time in (absolute) jiffies that should be rounded
 288 *
 289 * This is the same as round_jiffies() except that it will never
 290 * round down.  This is useful for timeouts for which the exact time
 291 * of firing does not matter too much, as long as they don't fire too
 292 * early.
 293 */
 294unsigned long round_jiffies_up(unsigned long j)
 295{
 296        return round_jiffies_common(j, raw_smp_processor_id(), true);
 297}
 298EXPORT_SYMBOL_GPL(round_jiffies_up);
 299
 300/**
 301 * round_jiffies_up_relative - function to round jiffies up to a full second
 302 * @j: the time in (relative) jiffies that should be rounded
 303 *
 304 * This is the same as round_jiffies_relative() except that it will never
 305 * round down.  This is useful for timeouts for which the exact time
 306 * of firing does not matter too much, as long as they don't fire too
 307 * early.
 308 */
 309unsigned long round_jiffies_up_relative(unsigned long j)
 310{
 311        return __round_jiffies_up_relative(j, raw_smp_processor_id());
 312}
 313EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
 314
 315/**
 316 * set_timer_slack - set the allowed slack for a timer
 317 * @timer: the timer to be modified
 318 * @slack_hz: the amount of time (in jiffies) allowed for rounding
 319 *
 320 * Set the amount of time, in jiffies, that a certain timer has
 321 * in terms of slack. By setting this value, the timer subsystem
 322 * will schedule the actual timer somewhere between
 323 * the time mod_timer() asks for, and that time plus the slack.
 324 *
 325 * By setting the slack to -1, a percentage of the delay is used
 326 * instead.
 327 */
 328void set_timer_slack(struct timer_list *timer, int slack_hz)
 329{
 330        timer->slack = slack_hz;
 331}
 332EXPORT_SYMBOL_GPL(set_timer_slack);
 333
 334static void
 335__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 336{
 337        unsigned long expires = timer->expires;
 338        unsigned long idx = expires - base->timer_jiffies;
 339        struct list_head *vec;
 340
 341        if (idx < TVR_SIZE) {
 342                int i = expires & TVR_MASK;
 343                vec = base->tv1.vec + i;
 344        } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
 345                int i = (expires >> TVR_BITS) & TVN_MASK;
 346                vec = base->tv2.vec + i;
 347        } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
 348                int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
 349                vec = base->tv3.vec + i;
 350        } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
 351                int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
 352                vec = base->tv4.vec + i;
 353        } else if ((signed long) idx < 0) {
 354                /*
 355                 * Can happen if you add a timer with expires == jiffies,
 356                 * or you set a timer to go off in the past
 357                 */
 358                vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
 359        } else {
 360                int i;
 361                /* If the timeout is larger than 0xffffffff on 64-bit
 362                 * architectures then we use the maximum timeout:
 363                 */
 364                if (idx > 0xffffffffUL) {
 365                        idx = 0xffffffffUL;
 366                        expires = idx + base->timer_jiffies;
 367                }
 368                i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
 369                vec = base->tv5.vec + i;
 370        }
 371        /*
 372         * Timers are FIFO:
 373         */
 374        list_add_tail(&timer->entry, vec);
 375}
 376
 377static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 378{
 379        __internal_add_timer(base, timer);
 380        /*
 381         * Update base->active_timers and base->next_timer
 382         */
 383        if (!tbase_get_deferrable(timer->base)) {
 384                if (time_before(timer->expires, base->next_timer))
 385                        base->next_timer = timer->expires;
 386                base->active_timers++;
 387        }
 388}
 389
 390#ifdef CONFIG_TIMER_STATS
 391void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
 392{
 393        if (timer->start_site)
 394                return;
 395
 396        timer->start_site = addr;
 397        memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
 398        timer->start_pid = current->pid;
 399}
 400
 401static void timer_stats_account_timer(struct timer_list *timer)
 402{
 403        unsigned int flag = 0;
 404
 405        if (likely(!timer->start_site))
 406                return;
 407        if (unlikely(tbase_get_deferrable(timer->base)))
 408                flag |= TIMER_STATS_FLAG_DEFERRABLE;
 409
 410        timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
 411                                 timer->function, timer->start_comm, flag);
 412}
 413
 414#else
 415static void timer_stats_account_timer(struct timer_list *timer) {}
 416#endif
 417
 418#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
 419
 420static struct debug_obj_descr timer_debug_descr;
 421
 422static void *timer_debug_hint(void *addr)
 423{
 424        return ((struct timer_list *) addr)->function;
 425}
 426
 427/*
 428 * fixup_init is called when:
 429 * - an active object is initialized
 430 */
 431static int timer_fixup_init(void *addr, enum debug_obj_state state)
 432{
 433        struct timer_list *timer = addr;
 434
 435        switch (state) {
 436        case ODEBUG_STATE_ACTIVE:
 437                del_timer_sync(timer);
 438                debug_object_init(timer, &timer_debug_descr);
 439                return 1;
 440        default:
 441                return 0;
 442        }
 443}
 444
 445/* Stub timer callback for improperly used timers. */
 446static void stub_timer(unsigned long data)
 447{
 448        WARN_ON(1);
 449}
 450
 451/*
 452 * fixup_activate is called when:
 453 * - an active object is activated
 454 * - an unknown object is activated (might be a statically initialized object)
 455 */
 456static int timer_fixup_activate(void *addr, enum debug_obj_state state)
 457{
 458        struct timer_list *timer = addr;
 459
 460        switch (state) {
 461
 462        case ODEBUG_STATE_NOTAVAILABLE:
 463                /*
 464                 * This is not really a fixup. The timer was
 465                 * statically initialized. We just make sure that it
 466                 * is tracked in the object tracker.
 467                 */
 468                if (timer->entry.next == NULL &&
 469                    timer->entry.prev == TIMER_ENTRY_STATIC) {
 470                        debug_object_init(timer, &timer_debug_descr);
 471                        debug_object_activate(timer, &timer_debug_descr);
 472                        return 0;
 473                } else {
 474                        setup_timer(timer, stub_timer, 0);
 475                        return 1;
 476                }
 477                return 0;
 478
 479        case ODEBUG_STATE_ACTIVE:
 480                WARN_ON(1);
 481
 482        default:
 483                return 0;
 484        }
 485}
 486
 487/*
 488 * fixup_free is called when:
 489 * - an active object is freed
 490 */
 491static int timer_fixup_free(void *addr, enum debug_obj_state state)
 492{
 493        struct timer_list *timer = addr;
 494
 495        switch (state) {
 496        case ODEBUG_STATE_ACTIVE:
 497                del_timer_sync(timer);
 498                debug_object_free(timer, &timer_debug_descr);
 499                return 1;
 500        default:
 501                return 0;
 502        }
 503}
 504
 505/*
 506 * fixup_assert_init is called when:
 507 * - an untracked/uninit-ed object is found
 508 */
 509static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
 510{
 511        struct timer_list *timer = addr;
 512
 513        switch (state) {
 514        case ODEBUG_STATE_NOTAVAILABLE:
 515                if (timer->entry.prev == TIMER_ENTRY_STATIC) {
 516                        /*
 517                         * This is not really a fixup. The timer was
 518                         * statically initialized. We just make sure that it
 519                         * is tracked in the object tracker.
 520                         */
 521                        debug_object_init(timer, &timer_debug_descr);
 522                        return 0;
 523                } else {
 524                        setup_timer(timer, stub_timer, 0);
 525                        return 1;
 526                }
 527        default:
 528                return 0;
 529        }
 530}
 531
 532static struct debug_obj_descr timer_debug_descr = {
 533        .name                   = "timer_list",
 534        .debug_hint             = timer_debug_hint,
 535        .fixup_init             = timer_fixup_init,
 536        .fixup_activate         = timer_fixup_activate,
 537        .fixup_free             = timer_fixup_free,
 538        .fixup_assert_init      = timer_fixup_assert_init,
 539};
 540
 541static inline void debug_timer_init(struct timer_list *timer)
 542{
 543        debug_object_init(timer, &timer_debug_descr);
 544}
 545
 546static inline void debug_timer_activate(struct timer_list *timer)
 547{
 548        debug_object_activate(timer, &timer_debug_descr);
 549}
 550
 551static inline void debug_timer_deactivate(struct timer_list *timer)
 552{
 553        debug_object_deactivate(timer, &timer_debug_descr);
 554}
 555
 556static inline void debug_timer_free(struct timer_list *timer)
 557{
 558        debug_object_free(timer, &timer_debug_descr);
 559}
 560
 561static inline void debug_timer_assert_init(struct timer_list *timer)
 562{
 563        debug_object_assert_init(timer, &timer_debug_descr);
 564}
 565
 566static void __init_timer(struct timer_list *timer,
 567                         const char *name,
 568                         struct lock_class_key *key);
 569
 570void init_timer_on_stack_key(struct timer_list *timer,
 571                             const char *name,
 572                             struct lock_class_key *key)
 573{
 574        debug_object_init_on_stack(timer, &timer_debug_descr);
 575        __init_timer(timer, name, key);
 576}
 577EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
 578
 579void destroy_timer_on_stack(struct timer_list *timer)
 580{
 581        debug_object_free(timer, &timer_debug_descr);
 582}
 583EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
 584
 585#else
 586static inline void debug_timer_init(struct timer_list *timer) { }
 587static inline void debug_timer_activate(struct timer_list *timer) { }
 588static inline void debug_timer_deactivate(struct timer_list *timer) { }
 589static inline void debug_timer_assert_init(struct timer_list *timer) { }
 590#endif
 591
 592static inline void debug_init(struct timer_list *timer)
 593{
 594        debug_timer_init(timer);
 595        trace_timer_init(timer);
 596}
 597
 598static inline void
 599debug_activate(struct timer_list *timer, unsigned long expires)
 600{
 601        debug_timer_activate(timer);
 602        trace_timer_start(timer, expires);
 603}
 604
 605static inline void debug_deactivate(struct timer_list *timer)
 606{
 607        debug_timer_deactivate(timer);
 608        trace_timer_cancel(timer);
 609}
 610
 611static inline void debug_assert_init(struct timer_list *timer)
 612{
 613        debug_timer_assert_init(timer);
 614}
 615
 616static void __init_timer(struct timer_list *timer,
 617                         const char *name,
 618                         struct lock_class_key *key)
 619{
 620        timer->entry.next = NULL;
 621        timer->base = __raw_get_cpu_var(tvec_bases);
 622        timer->slack = -1;
 623#ifdef CONFIG_TIMER_STATS
 624        timer->start_site = NULL;
 625        timer->start_pid = -1;
 626        memset(timer->start_comm, 0, TASK_COMM_LEN);
 627#endif
 628        lockdep_init_map(&timer->lockdep_map, name, key, 0);
 629}
 630
 631void setup_deferrable_timer_on_stack_key(struct timer_list *timer,
 632                                         const char *name,
 633                                         struct lock_class_key *key,
 634                                         void (*function)(unsigned long),
 635                                         unsigned long data)
 636{
 637        timer->function = function;
 638        timer->data = data;
 639        init_timer_on_stack_key(timer, name, key);
 640        timer_set_deferrable(timer);
 641}
 642EXPORT_SYMBOL_GPL(setup_deferrable_timer_on_stack_key);
 643
 644/**
 645 * init_timer_key - initialize a timer
 646 * @timer: the timer to be initialized
 647 * @name: name of the timer
 648 * @key: lockdep class key of the fake lock used for tracking timer
 649 *       sync lock dependencies
 650 *
 651 * init_timer_key() must be done to a timer prior calling *any* of the
 652 * other timer functions.
 653 */
 654void init_timer_key(struct timer_list *timer,
 655                    const char *name,
 656                    struct lock_class_key *key)
 657{
 658        debug_init(timer);
 659        __init_timer(timer, name, key);
 660}
 661EXPORT_SYMBOL(init_timer_key);
 662
 663void init_timer_deferrable_key(struct timer_list *timer,
 664                               const char *name,
 665                               struct lock_class_key *key)
 666{
 667        init_timer_key(timer, name, key);
 668        timer_set_deferrable(timer);
 669}
 670EXPORT_SYMBOL(init_timer_deferrable_key);
 671
 672static inline void detach_timer(struct timer_list *timer, bool clear_pending)
 673{
 674        struct list_head *entry = &timer->entry;
 675
 676        debug_deactivate(timer);
 677
 678        __list_del(entry->prev, entry->next);
 679        if (clear_pending)
 680                entry->next = NULL;
 681        entry->prev = LIST_POISON2;
 682}
 683
 684static inline void
 685detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
 686{
 687        detach_timer(timer, true);
 688        if (!tbase_get_deferrable(timer->base))
 689                timer->base->active_timers--;
 690}
 691
 692static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
 693                             bool clear_pending)
 694{
 695        if (!timer_pending(timer))
 696                return 0;
 697
 698        detach_timer(timer, clear_pending);
 699        if (!tbase_get_deferrable(timer->base)) {
 700                timer->base->active_timers--;
 701                if (timer->expires == base->next_timer)
 702                        base->next_timer = base->timer_jiffies;
 703        }
 704        return 1;
 705}
 706
 707/*
 708 * We are using hashed locking: holding per_cpu(tvec_bases).lock
 709 * means that all timers which are tied to this base via timer->base are
 710 * locked, and the base itself is locked too.
 711 *
 712 * So __run_timers/migrate_timers can safely modify all timers which could
 713 * be found on ->tvX lists.
 714 *
 715 * When the timer's base is locked, and the timer removed from list, it is
 716 * possible to set timer->base = NULL and drop the lock: the timer remains
 717 * locked.
 718 */
 719static struct tvec_base *lock_timer_base(struct timer_list *timer,
 720                                        unsigned long *flags)
 721        __acquires(timer->base->lock)
 722{
 723        struct tvec_base *base;
 724
 725        for (;;) {
 726                struct tvec_base *prelock_base = timer->base;
 727                base = tbase_get_base(prelock_base);
 728                if (likely(base != NULL)) {
 729                        spin_lock_irqsave(&base->lock, *flags);
 730                        if (likely(prelock_base == timer->base))
 731                                return base;
 732                        /* The timer has migrated to another CPU */
 733                        spin_unlock_irqrestore(&base->lock, *flags);
 734                }
 735                cpu_relax();
 736        }
 737}
 738
 739static inline int
 740__mod_timer(struct timer_list *timer, unsigned long expires,
 741                                                bool pending_only, int pinned)
 742{
 743        struct tvec_base *base, *new_base;
 744        unsigned long flags;
 745        int ret = 0 , cpu;
 746
 747        timer_stats_timer_set_start_info(timer);
 748        BUG_ON(!timer->function);
 749
 750        base = lock_timer_base(timer, &flags);
 751
 752        ret = detach_if_pending(timer, base, false);
 753        if (!ret && pending_only)
 754                goto out_unlock;
 755
 756        debug_activate(timer, expires);
 757
 758        cpu = smp_processor_id();
 759
 760#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
 761        if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
 762                cpu = get_nohz_timer_target();
 763#endif
 764        new_base = per_cpu(tvec_bases, cpu);
 765
 766        if (base != new_base) {
 767                /*
 768                 * We are trying to schedule the timer on the local CPU.
 769                 * However we can't change timer's base while it is running,
 770                 * otherwise del_timer_sync() can't detect that the timer's
 771                 * handler yet has not finished. This also guarantees that
 772                 * the timer is serialized wrt itself.
 773                 */
 774                if (likely(base->running_timer != timer)) {
 775                        /* See the comment in lock_timer_base() */
 776                        timer_set_base(timer, NULL);
 777                        spin_unlock(&base->lock);
 778                        base = new_base;
 779                        spin_lock(&base->lock);
 780                        timer_set_base(timer, base);
 781                }
 782        }
 783
 784        timer->expires = expires;
 785        internal_add_timer(base, timer);
 786
 787out_unlock:
 788        spin_unlock_irqrestore(&base->lock, flags);
 789
 790        return ret;
 791}
 792
 793/**
 794 * mod_timer_pending - modify a pending timer's timeout
 795 * @timer: the pending timer to be modified
 796 * @expires: new timeout in jiffies
 797 *
 798 * mod_timer_pending() is the same for pending timers as mod_timer(),
 799 * but will not re-activate and modify already deleted timers.
 800 *
 801 * It is useful for unserialized use of timers.
 802 */
 803int mod_timer_pending(struct timer_list *timer, unsigned long expires)
 804{
 805        return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
 806}
 807EXPORT_SYMBOL(mod_timer_pending);
 808
 809/*
 810 * Decide where to put the timer while taking the slack into account
 811 *
 812 * Algorithm:
 813 *   1) calculate the maximum (absolute) time
 814 *   2) calculate the highest bit where the expires and new max are different
 815 *   3) use this bit to make a mask
 816 *   4) use the bitmask to round down the maximum time, so that all last
 817 *      bits are zeros
 818 */
 819static inline
 820unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
 821{
 822        unsigned long expires_limit, mask;
 823        int bit;
 824
 825        if (timer->slack >= 0) {
 826                expires_limit = expires + timer->slack;
 827        } else {
 828                long delta = expires - jiffies;
 829
 830                if (delta < 256)
 831                        return expires;
 832
 833                expires_limit = expires + delta / 256;
 834        }
 835        mask = expires ^ expires_limit;
 836        if (mask == 0)
 837                return expires;
 838
 839        bit = find_last_bit(&mask, BITS_PER_LONG);
 840
 841        mask = (1 << bit) - 1;
 842
 843        expires_limit = expires_limit & ~(mask);
 844
 845        return expires_limit;
 846}
 847
 848/**
 849 * mod_timer - modify a timer's timeout
 850 * @timer: the timer to be modified
 851 * @expires: new timeout in jiffies
 852 *
 853 * mod_timer() is a more efficient way to update the expire field of an
 854 * active timer (if the timer is inactive it will be activated)
 855 *
 856 * mod_timer(timer, expires) is equivalent to:
 857 *
 858 *     del_timer(timer); timer->expires = expires; add_timer(timer);
 859 *
 860 * Note that if there are multiple unserialized concurrent users of the
 861 * same timer, then mod_timer() is the only safe way to modify the timeout,
 862 * since add_timer() cannot modify an already running timer.
 863 *
 864 * The function returns whether it has modified a pending timer or not.
 865 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
 866 * active timer returns 1.)
 867 */
 868int mod_timer(struct timer_list *timer, unsigned long expires)
 869{
 870        expires = apply_slack(timer, expires);
 871
 872        /*
 873         * This is a common optimization triggered by the
 874         * networking code - if the timer is re-modified
 875         * to be the same thing then just return:
 876         */
 877        if (timer_pending(timer) && timer->expires == expires)
 878                return 1;
 879
 880        return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
 881}
 882EXPORT_SYMBOL(mod_timer);
 883
 884/**
 885 * mod_timer_pinned - modify a timer's timeout
 886 * @timer: the timer to be modified
 887 * @expires: new timeout in jiffies
 888 *
 889 * mod_timer_pinned() is a way to update the expire field of an
 890 * active timer (if the timer is inactive it will be activated)
 891 * and to ensure that the timer is scheduled on the current CPU.
 892 *
 893 * Note that this does not prevent the timer from being migrated
 894 * when the current CPU goes offline.  If this is a problem for
 895 * you, use CPU-hotplug notifiers to handle it correctly, for
 896 * example, cancelling the timer when the corresponding CPU goes
 897 * offline.
 898 *
 899 * mod_timer_pinned(timer, expires) is equivalent to:
 900 *
 901 *     del_timer(timer); timer->expires = expires; add_timer(timer);
 902 */
 903int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
 904{
 905        if (timer->expires == expires && timer_pending(timer))
 906                return 1;
 907
 908        return __mod_timer(timer, expires, false, TIMER_PINNED);
 909}
 910EXPORT_SYMBOL(mod_timer_pinned);
 911
 912/**
 913 * add_timer - start a timer
 914 * @timer: the timer to be added
 915 *
 916 * The kernel will do a ->function(->data) callback from the
 917 * timer interrupt at the ->expires point in the future. The
 918 * current time is 'jiffies'.
 919 *
 920 * The timer's ->expires, ->function (and if the handler uses it, ->data)
 921 * fields must be set prior calling this function.
 922 *
 923 * Timers with an ->expires field in the past will be executed in the next
 924 * timer tick.
 925 */
 926void add_timer(struct timer_list *timer)
 927{
 928        BUG_ON(timer_pending(timer));
 929        mod_timer(timer, timer->expires);
 930}
 931EXPORT_SYMBOL(add_timer);
 932
 933/**
 934 * add_timer_on - start a timer on a particular CPU
 935 * @timer: the timer to be added
 936 * @cpu: the CPU to start it on
 937 *
 938 * This is not very scalable on SMP. Double adds are not possible.
 939 */
 940void add_timer_on(struct timer_list *timer, int cpu)
 941{
 942        struct tvec_base *base = per_cpu(tvec_bases, cpu);
 943        unsigned long flags;
 944
 945        timer_stats_timer_set_start_info(timer);
 946        BUG_ON(timer_pending(timer) || !timer->function);
 947        spin_lock_irqsave(&base->lock, flags);
 948        timer_set_base(timer, base);
 949        debug_activate(timer, timer->expires);
 950        internal_add_timer(base, timer);
 951        /*
 952         * Check whether the other CPU is idle and needs to be
 953         * triggered to reevaluate the timer wheel when nohz is
 954         * active. We are protected against the other CPU fiddling
 955         * with the timer by holding the timer base lock. This also
 956         * makes sure that a CPU on the way to idle can not evaluate
 957         * the timer wheel.
 958         */
 959        wake_up_idle_cpu(cpu);
 960        spin_unlock_irqrestore(&base->lock, flags);
 961}
 962EXPORT_SYMBOL_GPL(add_timer_on);
 963
 964/**
 965 * del_timer - deactive a timer.
 966 * @timer: the timer to be deactivated
 967 *
 968 * del_timer() deactivates a timer - this works on both active and inactive
 969 * timers.
 970 *
 971 * The function returns whether it has deactivated a pending timer or not.
 972 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
 973 * active timer returns 1.)
 974 */
 975int del_timer(struct timer_list *timer)
 976{
 977        struct tvec_base *base;
 978        unsigned long flags;
 979        int ret = 0;
 980
 981        debug_assert_init(timer);
 982
 983        timer_stats_timer_clear_start_info(timer);
 984        if (timer_pending(timer)) {
 985                base = lock_timer_base(timer, &flags);
 986                ret = detach_if_pending(timer, base, true);
 987                spin_unlock_irqrestore(&base->lock, flags);
 988        }
 989
 990        return ret;
 991}
 992EXPORT_SYMBOL(del_timer);
 993
 994/**
 995 * try_to_del_timer_sync - Try to deactivate a timer
 996 * @timer: timer do del
 997 *
 998 * This function tries to deactivate a timer. Upon successful (ret >= 0)
 999 * exit the timer is not queued and the handler is not running on any CPU.
1000 */
1001int try_to_del_timer_sync(struct timer_list *timer)
1002{
1003        struct tvec_base *base;
1004        unsigned long flags;
1005        int ret = -1;
1006
1007        debug_assert_init(timer);
1008
1009        base = lock_timer_base(timer, &flags);
1010
1011        if (base->running_timer != timer) {
1012                timer_stats_timer_clear_start_info(timer);
1013                ret = detach_if_pending(timer, base, true);
1014        }
1015        spin_unlock_irqrestore(&base->lock, flags);
1016
1017        return ret;
1018}
1019EXPORT_SYMBOL(try_to_del_timer_sync);
1020
1021#ifdef CONFIG_SMP
1022/**
1023 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1024 * @timer: the timer to be deactivated
1025 *
1026 * This function only differs from del_timer() on SMP: besides deactivating
1027 * the timer it also makes sure the handler has finished executing on other
1028 * CPUs.
1029 *
1030 * Synchronization rules: Callers must prevent restarting of the timer,
1031 * otherwise this function is meaningless. It must not be called from
1032 * interrupt contexts. The caller must not hold locks which would prevent
1033 * completion of the timer's handler. The timer's handler must not call
1034 * add_timer_on(). Upon exit the timer is not queued and the handler is
1035 * not running on any CPU.
1036 *
1037 * Note: You must not hold locks that are held in interrupt context
1038 *   while calling this function. Even if the lock has nothing to do
1039 *   with the timer in question.  Here's why:
1040 *
1041 *    CPU0                             CPU1
1042 *    ----                             ----
1043 *                                   <SOFTIRQ>
1044 *                                   call_timer_fn();
1045 *                                     base->running_timer = mytimer;
1046 *  spin_lock_irq(somelock);
1047 *                                     <IRQ>
1048 *                                        spin_lock(somelock);
1049 *  del_timer_sync(mytimer);
1050 *   while (base->running_timer == mytimer);
1051 *
1052 * Now del_timer_sync() will never return and never release somelock.
1053 * The interrupt on the other CPU is waiting to grab somelock but
1054 * it has interrupted the softirq that CPU0 is waiting to finish.
1055 *
1056 * The function returns whether it has deactivated a pending timer or not.
1057 */
1058int del_timer_sync(struct timer_list *timer)
1059{
1060#ifdef CONFIG_LOCKDEP
1061        unsigned long flags;
1062
1063        /*
1064         * If lockdep gives a backtrace here, please reference
1065         * the synchronization rules above.
1066         */
1067        local_irq_save(flags);
1068        lock_map_acquire(&timer->lockdep_map);
1069        lock_map_release(&timer->lockdep_map);
1070        local_irq_restore(flags);
1071#endif
1072        /*
1073         * don't use it in hardirq context, because it
1074         * could lead to deadlock.
1075         */
1076        WARN_ON(in_irq());
1077        for (;;) {
1078                int ret = try_to_del_timer_sync(timer);
1079                if (ret >= 0)
1080                        return ret;
1081                cpu_relax();
1082        }
1083}
1084EXPORT_SYMBOL(del_timer_sync);
1085#endif
1086
1087static int cascade(struct tvec_base *base, struct tvec *tv, int index)
1088{
1089        /* cascade all the timers from tv up one level */
1090        struct timer_list *timer, *tmp;
1091        struct list_head tv_list;
1092
1093        list_replace_init(tv->vec + index, &tv_list);
1094
1095        /*
1096         * We are removing _all_ timers from the list, so we
1097         * don't have to detach them individually.
1098         */
1099        list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
1100                BUG_ON(tbase_get_base(timer->base) != base);
1101                /* No accounting, while moving them */
1102                __internal_add_timer(base, timer);
1103        }
1104
1105        return index;
1106}
1107
1108static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1109                          unsigned long data)
1110{
1111        int preempt_count = preempt_count();
1112
1113#ifdef CONFIG_LOCKDEP
1114        /*
1115         * It is permissible to free the timer from inside the
1116         * function that is called from it, this we need to take into
1117         * account for lockdep too. To avoid bogus "held lock freed"
1118         * warnings as well as problems when looking into
1119         * timer->lockdep_map, make a copy and use that here.
1120         */
1121        struct lockdep_map lockdep_map;
1122
1123        lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1124#endif
1125        /*
1126         * Couple the lock chain with the lock chain at
1127         * del_timer_sync() by acquiring the lock_map around the fn()
1128         * call here and in del_timer_sync().
1129         */
1130        lock_map_acquire(&lockdep_map);
1131
1132        trace_timer_expire_entry(timer);
1133        fn(data);
1134        trace_timer_expire_exit(timer);
1135
1136        lock_map_release(&lockdep_map);
1137
1138        if (preempt_count != preempt_count()) {
1139                WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1140                          fn, preempt_count, preempt_count());
1141                /*
1142                 * Restore the preempt count. That gives us a decent
1143                 * chance to survive and extract information. If the
1144                 * callback kept a lock held, bad luck, but not worse
1145                 * than the BUG() we had.
1146                 */
1147                preempt_count() = preempt_count;
1148        }
1149}
1150
1151#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1152
1153/**
1154 * __run_timers - run all expired timers (if any) on this CPU.
1155 * @base: the timer vector to be processed.
1156 *
1157 * This function cascades all vectors and executes all expired timer
1158 * vectors.
1159 */
1160static inline void __run_timers(struct tvec_base *base)
1161{
1162        struct timer_list *timer;
1163
1164        spin_lock_irq(&base->lock);
1165        while (time_after_eq(jiffies, base->timer_jiffies)) {
1166                struct list_head work_list;
1167                struct list_head *head = &work_list;
1168                int index = base->timer_jiffies & TVR_MASK;
1169
1170                /*
1171                 * Cascade timers:
1172                 */
1173                if (!index &&
1174                        (!cascade(base, &base->tv2, INDEX(0))) &&
1175                                (!cascade(base, &base->tv3, INDEX(1))) &&
1176                                        !cascade(base, &base->tv4, INDEX(2)))
1177                        cascade(base, &base->tv5, INDEX(3));
1178                ++base->timer_jiffies;
1179                list_replace_init(base->tv1.vec + index, &work_list);
1180                while (!list_empty(head)) {
1181                        void (*fn)(unsigned long);
1182                        unsigned long data;
1183
1184                        timer = list_first_entry(head, struct timer_list,entry);
1185                        fn = timer->function;
1186                        data = timer->data;
1187
1188                        timer_stats_account_timer(timer);
1189
1190                        base->running_timer = timer;
1191                        detach_expired_timer(timer, base);
1192
1193                        spin_unlock_irq(&base->lock);
1194                        call_timer_fn(timer, fn, data);
1195                        spin_lock_irq(&base->lock);
1196                }
1197        }
1198        base->running_timer = NULL;
1199        spin_unlock_irq(&base->lock);
1200}
1201
1202#ifdef CONFIG_NO_HZ
1203/*
1204 * Find out when the next timer event is due to happen. This
1205 * is used on S/390 to stop all activity when a CPU is idle.
1206 * This function needs to be called with interrupts disabled.
1207 */
1208static unsigned long __next_timer_interrupt(struct tvec_base *base)
1209{
1210        unsigned long timer_jiffies = base->timer_jiffies;
1211        unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
1212        int index, slot, array, found = 0;
1213        struct timer_list *nte;
1214        struct tvec *varray[4];
1215
1216        /* Look for timer events in tv1. */
1217        index = slot = timer_jiffies & TVR_MASK;
1218        do {
1219                list_for_each_entry(nte, base->tv1.vec + slot, entry) {
1220                        if (tbase_get_deferrable(nte->base))
1221                                continue;
1222
1223                        found = 1;
1224                        expires = nte->expires;
1225                        /* Look at the cascade bucket(s)? */
1226                        if (!index || slot < index)
1227                                goto cascade;
1228                        return expires;
1229                }
1230                slot = (slot + 1) & TVR_MASK;
1231        } while (slot != index);
1232
1233cascade:
1234        /* Calculate the next cascade event */
1235        if (index)
1236                timer_jiffies += TVR_SIZE - index;
1237        timer_jiffies >>= TVR_BITS;
1238
1239        /* Check tv2-tv5. */
1240        varray[0] = &base->tv2;
1241        varray[1] = &base->tv3;
1242        varray[2] = &base->tv4;
1243        varray[3] = &base->tv5;
1244
1245        for (array = 0; array < 4; array++) {
1246                struct tvec *varp = varray[array];
1247
1248                index = slot = timer_jiffies & TVN_MASK;
1249                do {
1250                        list_for_each_entry(nte, varp->vec + slot, entry) {
1251                                if (tbase_get_deferrable(nte->base))
1252                                        continue;
1253
1254                                found = 1;
1255                                if (time_before(nte->expires, expires))
1256                                        expires = nte->expires;
1257                        }
1258                        /*
1259                         * Do we still search for the first timer or are
1260                         * we looking up the cascade buckets ?
1261                         */
1262                        if (found) {
1263                                /* Look at the cascade bucket(s)? */
1264                                if (!index || slot < index)
1265                                        break;
1266                                return expires;
1267                        }
1268                        slot = (slot + 1) & TVN_MASK;
1269                } while (slot != index);
1270
1271                if (index)
1272                        timer_jiffies += TVN_SIZE - index;
1273                timer_jiffies >>= TVN_BITS;
1274        }
1275        return expires;
1276}
1277
1278/*
1279 * Check, if the next hrtimer event is before the next timer wheel
1280 * event:
1281 */
1282static unsigned long cmp_next_hrtimer_event(unsigned long now,
1283                                            unsigned long expires)
1284{
1285        ktime_t hr_delta = hrtimer_get_next_event();
1286        struct timespec tsdelta;
1287        unsigned long delta;
1288
1289        if (hr_delta.tv64 == KTIME_MAX)
1290                return expires;
1291
1292        /*
1293         * Expired timer available, let it expire in the next tick
1294         */
1295        if (hr_delta.tv64 <= 0)
1296                return now + 1;
1297
1298        tsdelta = ktime_to_timespec(hr_delta);
1299        delta = timespec_to_jiffies(&tsdelta);
1300
1301        /*
1302         * Limit the delta to the max value, which is checked in
1303         * tick_nohz_stop_sched_tick():
1304         */
1305        if (delta > NEXT_TIMER_MAX_DELTA)
1306                delta = NEXT_TIMER_MAX_DELTA;
1307
1308        /*
1309         * Take rounding errors in to account and make sure, that it
1310         * expires in the next tick. Otherwise we go into an endless
1311         * ping pong due to tick_nohz_stop_sched_tick() retriggering
1312         * the timer softirq
1313         */
1314        if (delta < 1)
1315                delta = 1;
1316        now += delta;
1317        if (time_before(now, expires))
1318                return now;
1319        return expires;
1320}
1321
1322/**
1323 * get_next_timer_interrupt - return the jiffy of the next pending timer
1324 * @now: current time (in jiffies)
1325 */
1326unsigned long get_next_timer_interrupt(unsigned long now)
1327{
1328        struct tvec_base *base = __this_cpu_read(tvec_bases);
1329        unsigned long expires = now + NEXT_TIMER_MAX_DELTA;
1330
1331        /*
1332         * Pretend that there is no timer pending if the cpu is offline.
1333         * Possible pending timers will be migrated later to an active cpu.
1334         */
1335        if (cpu_is_offline(smp_processor_id()))
1336                return expires;
1337
1338        spin_lock(&base->lock);
1339        if (base->active_timers) {
1340                if (time_before_eq(base->next_timer, base->timer_jiffies))
1341                        base->next_timer = __next_timer_interrupt(base);
1342                expires = base->next_timer;
1343        }
1344        spin_unlock(&base->lock);
1345
1346        if (time_before_eq(expires, now))
1347                return now;
1348
1349        return cmp_next_hrtimer_event(now, expires);
1350}
1351#endif
1352
1353/*
1354 * Called from the timer interrupt handler to charge one tick to the current
1355 * process.  user_tick is 1 if the tick is user time, 0 for system.
1356 */
1357void update_process_times(int user_tick)
1358{
1359        struct task_struct *p = current;
1360        int cpu = smp_processor_id();
1361
1362        /* Note: this timer irq context must be accounted for as well. */
1363        account_process_tick(p, user_tick);
1364        run_local_timers();
1365        rcu_check_callbacks(cpu, user_tick);
1366        printk_tick();
1367#ifdef CONFIG_IRQ_WORK
1368        if (in_irq())
1369                irq_work_run();
1370#endif
1371        scheduler_tick();
1372        run_posix_cpu_timers(p);
1373}
1374
1375/*
1376 * This function runs timers and the timer-tq in bottom half context.
1377 */
1378static void run_timer_softirq(struct softirq_action *h)
1379{
1380        struct tvec_base *base = __this_cpu_read(tvec_bases);
1381
1382        hrtimer_run_pending();
1383
1384        if (time_after_eq(jiffies, base->timer_jiffies))
1385                __run_timers(base);
1386}
1387
1388/*
1389 * Called by the local, per-CPU timer interrupt on SMP.
1390 */
1391void run_local_timers(void)
1392{
1393        hrtimer_run_queues();
1394        raise_softirq(TIMER_SOFTIRQ);
1395}
1396
1397#ifdef __ARCH_WANT_SYS_ALARM
1398
1399/*
1400 * For backwards compatibility?  This can be done in libc so Alpha
1401 * and all newer ports shouldn't need it.
1402 */
1403SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1404{
1405        return alarm_setitimer(seconds);
1406}
1407
1408#endif
1409
1410/**
1411 * sys_getpid - return the thread group id of the current process
1412 *
1413 * Note, despite the name, this returns the tgid not the pid.  The tgid and
1414 * the pid are identical unless CLONE_THREAD was specified on clone() in
1415 * which case the tgid is the same in all threads of the same group.
1416 *
1417 * This is SMP safe as current->tgid does not change.
1418 */
1419SYSCALL_DEFINE0(getpid)
1420{
1421        return task_tgid_vnr(current);
1422}
1423
1424/*
1425 * Accessing ->real_parent is not SMP-safe, it could
1426 * change from under us. However, we can use a stale
1427 * value of ->real_parent under rcu_read_lock(), see
1428 * release_task()->call_rcu(delayed_put_task_struct).
1429 */
1430SYSCALL_DEFINE0(getppid)
1431{
1432        int pid;
1433
1434        rcu_read_lock();
1435        pid = task_tgid_vnr(rcu_dereference(current->real_parent));
1436        rcu_read_unlock();
1437
1438        return pid;
1439}
1440
1441SYSCALL_DEFINE0(getuid)
1442{
1443        /* Only we change this so SMP safe */
1444        return from_kuid_munged(current_user_ns(), current_uid());
1445}
1446
1447SYSCALL_DEFINE0(geteuid)
1448{
1449        /* Only we change this so SMP safe */
1450        return from_kuid_munged(current_user_ns(), current_euid());
1451}
1452
1453SYSCALL_DEFINE0(getgid)
1454{
1455        /* Only we change this so SMP safe */
1456        return from_kgid_munged(current_user_ns(), current_gid());
1457}
1458
1459SYSCALL_DEFINE0(getegid)
1460{
1461        /* Only we change this so SMP safe */
1462        return from_kgid_munged(current_user_ns(), current_egid());
1463}
1464
1465static void process_timeout(unsigned long __data)
1466{
1467        wake_up_process((struct task_struct *)__data);
1468}
1469
1470/**
1471 * schedule_timeout - sleep until timeout
1472 * @timeout: timeout value in jiffies
1473 *
1474 * Make the current task sleep until @timeout jiffies have
1475 * elapsed. The routine will return immediately unless
1476 * the current task state has been set (see set_current_state()).
1477 *
1478 * You can set the task state as follows -
1479 *
1480 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1481 * pass before the routine returns. The routine will return 0
1482 *
1483 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1484 * delivered to the current task. In this case the remaining time
1485 * in jiffies will be returned, or 0 if the timer expired in time
1486 *
1487 * The current task state is guaranteed to be TASK_RUNNING when this
1488 * routine returns.
1489 *
1490 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1491 * the CPU away without a bound on the timeout. In this case the return
1492 * value will be %MAX_SCHEDULE_TIMEOUT.
1493 *
1494 * In all cases the return value is guaranteed to be non-negative.
1495 */
1496signed long __sched schedule_timeout(signed long timeout)
1497{
1498        struct timer_list timer;
1499        unsigned long expire;
1500
1501        switch (timeout)
1502        {
1503        case MAX_SCHEDULE_TIMEOUT:
1504                /*
1505                 * These two special cases are useful to be comfortable
1506                 * in the caller. Nothing more. We could take
1507                 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1508                 * but I' d like to return a valid offset (>=0) to allow
1509                 * the caller to do everything it want with the retval.
1510                 */
1511                schedule();
1512                goto out;
1513        default:
1514                /*
1515                 * Another bit of PARANOID. Note that the retval will be
1516                 * 0 since no piece of kernel is supposed to do a check
1517                 * for a negative retval of schedule_timeout() (since it
1518                 * should never happens anyway). You just have the printk()
1519                 * that will tell you if something is gone wrong and where.
1520                 */
1521                if (timeout < 0) {
1522                        printk(KERN_ERR "schedule_timeout: wrong timeout "
1523                                "value %lx\n", timeout);
1524                        dump_stack();
1525                        current->state = TASK_RUNNING;
1526                        goto out;
1527                }
1528        }
1529
1530        expire = timeout + jiffies;
1531
1532        setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1533        __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
1534        schedule();
1535        del_singleshot_timer_sync(&timer);
1536
1537        /* Remove the timer from the object tracker */
1538        destroy_timer_on_stack(&timer);
1539
1540        timeout = expire - jiffies;
1541
1542 out:
1543        return timeout < 0 ? 0 : timeout;
1544}
1545EXPORT_SYMBOL(schedule_timeout);
1546
1547/*
1548 * We can use __set_current_state() here because schedule_timeout() calls
1549 * schedule() unconditionally.
1550 */
1551signed long __sched schedule_timeout_interruptible(signed long timeout)
1552{
1553        __set_current_state(TASK_INTERRUPTIBLE);
1554        return schedule_timeout(timeout);
1555}
1556EXPORT_SYMBOL(schedule_timeout_interruptible);
1557
1558signed long __sched schedule_timeout_killable(signed long timeout)
1559{
1560        __set_current_state(TASK_KILLABLE);
1561        return schedule_timeout(timeout);
1562}
1563EXPORT_SYMBOL(schedule_timeout_killable);
1564
1565signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1566{
1567        __set_current_state(TASK_UNINTERRUPTIBLE);
1568        return schedule_timeout(timeout);
1569}
1570EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1571
1572/* Thread ID - the internal kernel "pid" */
1573SYSCALL_DEFINE0(gettid)
1574{
1575        return task_pid_vnr(current);
1576}
1577
1578/**
1579 * do_sysinfo - fill in sysinfo struct
1580 * @info: pointer to buffer to fill
1581 */
1582int do_sysinfo(struct sysinfo *info)
1583{
1584        unsigned long mem_total, sav_total;
1585        unsigned int mem_unit, bitcount;
1586        struct timespec tp;
1587
1588        memset(info, 0, sizeof(struct sysinfo));
1589
1590        ktime_get_ts(&tp);
1591        monotonic_to_bootbased(&tp);
1592        info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1593
1594        get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
1595
1596        info->procs = nr_threads;
1597
1598        si_meminfo(info);
1599        si_swapinfo(info);
1600
1601        /*
1602         * If the sum of all the available memory (i.e. ram + swap)
1603         * is less than can be stored in a 32 bit unsigned long then
1604         * we can be binary compatible with 2.2.x kernels.  If not,
1605         * well, in that case 2.2.x was broken anyways...
1606         *
1607         *  -Erik Andersen <andersee@debian.org>
1608         */
1609
1610        mem_total = info->totalram + info->totalswap;
1611        if (mem_total < info->totalram || mem_total < info->totalswap)
1612                goto out;
1613        bitcount = 0;
1614        mem_unit = info->mem_unit;
1615        while (mem_unit > 1) {
1616                bitcount++;
1617                mem_unit >>= 1;
1618                sav_total = mem_total;
1619                mem_total <<= 1;
1620                if (mem_total < sav_total)
1621                        goto out;
1622        }
1623
1624        /*
1625         * If mem_total did not overflow, multiply all memory values by
1626         * info->mem_unit and set it to 1.  This leaves things compatible
1627         * with 2.2.x, and also retains compatibility with earlier 2.4.x
1628         * kernels...
1629         */
1630
1631        info->mem_unit = 1;
1632        info->totalram <<= bitcount;
1633        info->freeram <<= bitcount;
1634        info->sharedram <<= bitcount;
1635        info->bufferram <<= bitcount;
1636        info->totalswap <<= bitcount;
1637        info->freeswap <<= bitcount;
1638        info->totalhigh <<= bitcount;
1639        info->freehigh <<= bitcount;
1640
1641out:
1642        return 0;
1643}
1644
1645SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
1646{
1647        struct sysinfo val;
1648
1649        do_sysinfo(&val);
1650
1651        if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1652                return -EFAULT;
1653
1654        return 0;
1655}
1656
1657static int __cpuinit init_timers_cpu(int cpu)
1658{
1659        int j;
1660        struct tvec_base *base;
1661        static char __cpuinitdata tvec_base_done[NR_CPUS];
1662
1663        if (!tvec_base_done[cpu]) {
1664                static char boot_done;
1665
1666                if (boot_done) {
1667                        /*
1668                         * The APs use this path later in boot
1669                         */
1670                        base = kmalloc_node(sizeof(*base),
1671                                                GFP_KERNEL | __GFP_ZERO,
1672                                                cpu_to_node(cpu));
1673                        if (!base)
1674                                return -ENOMEM;
1675
1676                        /* Make sure that tvec_base is 2 byte aligned */
1677                        if (tbase_get_deferrable(base)) {
1678                                WARN_ON(1);
1679                                kfree(base);
1680                                return -ENOMEM;
1681                        }
1682                        per_cpu(tvec_bases, cpu) = base;
1683                } else {
1684                        /*
1685                         * This is for the boot CPU - we use compile-time
1686                         * static initialisation because per-cpu memory isn't
1687                         * ready yet and because the memory allocators are not
1688                         * initialised either.
1689                         */
1690                        boot_done = 1;
1691                        base = &boot_tvec_bases;
1692                }
1693                tvec_base_done[cpu] = 1;
1694        } else {
1695                base = per_cpu(tvec_bases, cpu);
1696        }
1697
1698        spin_lock_init(&base->lock);
1699
1700        for (j = 0; j < TVN_SIZE; j++) {
1701                INIT_LIST_HEAD(base->tv5.vec + j);
1702                INIT_LIST_HEAD(base->tv4.vec + j);
1703                INIT_LIST_HEAD(base->tv3.vec + j);
1704                INIT_LIST_HEAD(base->tv2.vec + j);
1705        }
1706        for (j = 0; j < TVR_SIZE; j++)
1707                INIT_LIST_HEAD(base->tv1.vec + j);
1708
1709        base->timer_jiffies = jiffies;
1710        base->next_timer = base->timer_jiffies;
1711        base->active_timers = 0;
1712        return 0;
1713}
1714
1715#ifdef CONFIG_HOTPLUG_CPU
1716static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
1717{
1718        struct timer_list *timer;
1719
1720        while (!list_empty(head)) {
1721                timer = list_first_entry(head, struct timer_list, entry);
1722                /* We ignore the accounting on the dying cpu */
1723                detach_timer(timer, false);
1724                timer_set_base(timer, new_base);
1725                internal_add_timer(new_base, timer);
1726        }
1727}
1728
1729static void __cpuinit migrate_timers(int cpu)
1730{
1731        struct tvec_base *old_base;
1732        struct tvec_base *new_base;
1733        int i;
1734
1735        BUG_ON(cpu_online(cpu));
1736        old_base = per_cpu(tvec_bases, cpu);
1737        new_base = get_cpu_var(tvec_bases);
1738        /*
1739         * The caller is globally serialized and nobody else
1740         * takes two locks at once, deadlock is not possible.
1741         */
1742        spin_lock_irq(&new_base->lock);
1743        spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1744
1745        BUG_ON(old_base->running_timer);
1746
1747        for (i = 0; i < TVR_SIZE; i++)
1748                migrate_timer_list(new_base, old_base->tv1.vec + i);
1749        for (i = 0; i < TVN_SIZE; i++) {
1750                migrate_timer_list(new_base, old_base->tv2.vec + i);
1751                migrate_timer_list(new_base, old_base->tv3.vec + i);
1752                migrate_timer_list(new_base, old_base->tv4.vec + i);
1753                migrate_timer_list(new_base, old_base->tv5.vec + i);
1754        }
1755
1756        spin_unlock(&old_base->lock);
1757        spin_unlock_irq(&new_base->lock);
1758        put_cpu_var(tvec_bases);
1759}
1760#endif /* CONFIG_HOTPLUG_CPU */
1761
1762static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1763                                unsigned long action, void *hcpu)
1764{
1765        long cpu = (long)hcpu;
1766        int err;
1767
1768        switch(action) {
1769        case CPU_UP_PREPARE:
1770        case CPU_UP_PREPARE_FROZEN:
1771                err = init_timers_cpu(cpu);
1772                if (err < 0)
1773                        return notifier_from_errno(err);
1774                break;
1775#ifdef CONFIG_HOTPLUG_CPU
1776        case CPU_DEAD:
1777        case CPU_DEAD_FROZEN:
1778                migrate_timers(cpu);
1779                break;
1780#endif
1781        default:
1782                break;
1783        }
1784        return NOTIFY_OK;
1785}
1786
1787static struct notifier_block __cpuinitdata timers_nb = {
1788        .notifier_call  = timer_cpu_notify,
1789};
1790
1791
1792void __init init_timers(void)
1793{
1794        int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1795                                (void *)(long)smp_processor_id());
1796
1797        init_timer_stats();
1798
1799        BUG_ON(err != NOTIFY_OK);
1800        register_cpu_notifier(&timers_nb);
1801        open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1802}
1803
1804/**
1805 * msleep - sleep safely even with waitqueue interruptions
1806 * @msecs: Time in milliseconds to sleep for
1807 */
1808void msleep(unsigned int msecs)
1809{
1810        unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1811
1812        while (timeout)
1813                timeout = schedule_timeout_uninterruptible(timeout);
1814}
1815
1816EXPORT_SYMBOL(msleep);
1817
1818/**
1819 * msleep_interruptible - sleep waiting for signals
1820 * @msecs: Time in milliseconds to sleep for
1821 */
1822unsigned long msleep_interruptible(unsigned int msecs)
1823{
1824        unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1825
1826        while (timeout && !signal_pending(current))
1827                timeout = schedule_timeout_interruptible(timeout);
1828        return jiffies_to_msecs(timeout);
1829}
1830
1831EXPORT_SYMBOL(msleep_interruptible);
1832
1833static int __sched do_usleep_range(unsigned long min, unsigned long max)
1834{
1835        ktime_t kmin;
1836        unsigned long delta;
1837
1838        kmin = ktime_set(0, min * NSEC_PER_USEC);
1839        delta = (max - min) * NSEC_PER_USEC;
1840        return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1841}
1842
1843/**
1844 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1845 * @min: Minimum time in usecs to sleep
1846 * @max: Maximum time in usecs to sleep
1847 */
1848void usleep_range(unsigned long min, unsigned long max)
1849{
1850        __set_current_state(TASK_UNINTERRUPTIBLE);
1851        do_usleep_range(min, max);
1852}
1853EXPORT_SYMBOL(usleep_range);
1854
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.