linux/kernel/sched.c
<<
>>
Prefs
   1/*
   2 *  kernel/sched.c
   3 *
   4 *  Kernel scheduler and related syscalls
   5 *
   6 *  Copyright (C) 1991-2002  Linus Torvalds
   7 *
   8 *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
   9 *              make semaphores SMP safe
  10 *  1998-11-19  Implemented schedule_timeout() and related stuff
  11 *              by Andrea Arcangeli
  12 *  2002-01-04  New ultra-scalable O(1) scheduler by Ingo Molnar:
  13 *              hybrid priority-list and round-robin design with
  14 *              an array-switch method of distributing timeslices
  15 *              and per-CPU runqueues.  Cleanups and useful suggestions
  16 *              by Davide Libenzi, preemptible kernel bits by Robert Love.
  17 *  2003-09-03  Interactivity tuning by Con Kolivas.
  18 *  2004-04-02  Scheduler domains code by Nick Piggin
  19 *  2007-04-15  Work begun on replacing all interactivity tuning with a
  20 *              fair scheduling design by Con Kolivas.
  21 *  2007-05-05  Load balancing (smp-nice) and other improvements
  22 *              by Peter Williams
  23 *  2007-05-06  Interactivity improvements to CFS by Mike Galbraith
  24 *  2007-07-01  Group scheduling enhancements by Srivatsa Vaddagiri
  25 *  2007-11-29  RT balancing improvements by Steven Rostedt, Gregory Haskins,
  26 *              Thomas Gleixner, Mike Kravetz
  27 */
  28
  29#include <linux/mm.h>
  30#include <linux/module.h>
  31#include <linux/nmi.h>
  32#include <linux/init.h>
  33#include <linux/uaccess.h>
  34#include <linux/highmem.h>
  35#include <linux/smp_lock.h>
  36#include <asm/mmu_context.h>
  37#include <linux/interrupt.h>
  38#include <linux/capability.h>
  39#include <linux/completion.h>
  40#include <linux/kernel_stat.h>
  41#include <linux/debug_locks.h>
  42#include <linux/perf_event.h>
  43#include <linux/security.h>
  44#include <linux/notifier.h>
  45#include <linux/profile.h>
  46#include <linux/freezer.h>
  47#include <linux/vmalloc.h>
  48#include <linux/blkdev.h>
  49#include <linux/delay.h>
  50#include <linux/pid_namespace.h>
  51#include <linux/smp.h>
  52#include <linux/threads.h>
  53#include <linux/timer.h>
  54#include <linux/rcupdate.h>
  55#include <linux/cpu.h>
  56#include <linux/cpuset.h>
  57#include <linux/percpu.h>
  58#include <linux/proc_fs.h>
  59#include <linux/seq_file.h>
  60#include <linux/stop_machine.h>
  61#include <linux/sysctl.h>
  62#include <linux/syscalls.h>
  63#include <linux/times.h>
  64#include <linux/tsacct_kern.h>
  65#include <linux/kprobes.h>
  66#include <linux/delayacct.h>
  67#include <linux/unistd.h>
  68#include <linux/pagemap.h>
  69#include <linux/hrtimer.h>
  70#include <linux/tick.h>
  71#include <linux/debugfs.h>
  72#include <linux/ctype.h>
  73#include <linux/ftrace.h>
  74#include <linux/slab.h>
  75
  76#include <asm/tlb.h>
  77#include <asm/irq_regs.h>
  78
  79#include "sched_cpupri.h"
  80#include "workqueue_sched.h"
  81
  82#define CREATE_TRACE_POINTS
  83#include <trace/events/sched.h>
  84
  85/*
  86 * Convert user-nice values [ -20 ... 0 ... 19 ]
  87 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
  88 * and back.
  89 */
  90#define NICE_TO_PRIO(nice)      (MAX_RT_PRIO + (nice) + 20)
  91#define PRIO_TO_NICE(prio)      ((prio) - MAX_RT_PRIO - 20)
  92#define TASK_NICE(p)            PRIO_TO_NICE((p)->static_prio)
  93
  94/*
  95 * 'User priority' is the nice value converted to something we
  96 * can work with better when scaling various scheduler parameters,
  97 * it's a [ 0 ... 39 ] range.
  98 */
  99#define USER_PRIO(p)            ((p)-MAX_RT_PRIO)
 100#define TASK_USER_PRIO(p)       USER_PRIO((p)->static_prio)
 101#define MAX_USER_PRIO           (USER_PRIO(MAX_PRIO))
 102
 103/*
 104 * Helpers for converting nanosecond timing to jiffy resolution
 105 */
 106#define NS_TO_JIFFIES(TIME)     ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
 107
 108#define NICE_0_LOAD             SCHED_LOAD_SCALE
 109#define NICE_0_SHIFT            SCHED_LOAD_SHIFT
 110
 111/*
 112 * These are the 'tuning knobs' of the scheduler:
 113 *
 114 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
 115 * Timeslices get refilled after they expire.
 116 */
 117#define DEF_TIMESLICE           (100 * HZ / 1000)
 118
 119/*
 120 * single value that denotes runtime == period, ie unlimited time.
 121 */
 122#define RUNTIME_INF     ((u64)~0ULL)
 123
 124static inline int rt_policy(int policy)
 125{
 126        if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
 127                return 1;
 128        return 0;
 129}
 130
 131static inline int task_has_rt_policy(struct task_struct *p)
 132{
 133        return rt_policy(p->policy);
 134}
 135
 136/*
 137 * This is the priority-queue data structure of the RT scheduling class:
 138 */
 139struct rt_prio_array {
 140        DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
 141        struct list_head queue[MAX_RT_PRIO];
 142};
 143
 144struct rt_bandwidth {
 145        /* nests inside the rq lock: */
 146        raw_spinlock_t          rt_runtime_lock;
 147        ktime_t                 rt_period;
 148        u64                     rt_runtime;
 149        struct hrtimer          rt_period_timer;
 150};
 151
 152static struct rt_bandwidth def_rt_bandwidth;
 153
 154static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
 155
 156static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
 157{
 158        struct rt_bandwidth *rt_b =
 159                container_of(timer, struct rt_bandwidth, rt_period_timer);
 160        ktime_t now;
 161        int overrun;
 162        int idle = 0;
 163
 164        for (;;) {
 165                now = hrtimer_cb_get_time(timer);
 166                overrun = hrtimer_forward(timer, now, rt_b->rt_period);
 167
 168                if (!overrun)
 169                        break;
 170
 171                idle = do_sched_rt_period_timer(rt_b, overrun);
 172        }
 173
 174        return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
 175}
 176
 177static
 178void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
 179{
 180        rt_b->rt_period = ns_to_ktime(period);
 181        rt_b->rt_runtime = runtime;
 182
 183        raw_spin_lock_init(&rt_b->rt_runtime_lock);
 184
 185        hrtimer_init(&rt_b->rt_period_timer,
 186                        CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 187        rt_b->rt_period_timer.function = sched_rt_period_timer;
 188}
 189
 190static inline int rt_bandwidth_enabled(void)
 191{
 192        return sysctl_sched_rt_runtime >= 0;
 193}
 194
 195static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
 196{
 197        ktime_t now;
 198
 199        if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
 200                return;
 201
 202        if (hrtimer_active(&rt_b->rt_period_timer))
 203                return;
 204
 205        raw_spin_lock(&rt_b->rt_runtime_lock);
 206        for (;;) {
 207                unsigned long delta;
 208                ktime_t soft, hard;
 209
 210                if (hrtimer_active(&rt_b->rt_period_timer))
 211                        break;
 212
 213                now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
 214                hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
 215
 216                soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
 217                hard = hrtimer_get_expires(&rt_b->rt_period_timer);
 218                delta = ktime_to_ns(ktime_sub(hard, soft));
 219                __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
 220                                HRTIMER_MODE_ABS_PINNED, 0);
 221        }
 222        raw_spin_unlock(&rt_b->rt_runtime_lock);
 223}
 224
 225#ifdef CONFIG_RT_GROUP_SCHED
 226static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
 227{
 228        hrtimer_cancel(&rt_b->rt_period_timer);
 229}
 230#endif
 231
 232/*
 233 * sched_domains_mutex serializes calls to arch_init_sched_domains,
 234 * detach_destroy_domains and partition_sched_domains.
 235 */
 236static DEFINE_MUTEX(sched_domains_mutex);
 237
 238#ifdef CONFIG_CGROUP_SCHED
 239
 240#include <linux/cgroup.h>
 241
 242struct cfs_rq;
 243
 244static LIST_HEAD(task_groups);
 245
 246/* task group related information */
 247struct task_group {
 248        struct cgroup_subsys_state css;
 249
 250#ifdef CONFIG_FAIR_GROUP_SCHED
 251        /* schedulable entities of this group on each cpu */
 252        struct sched_entity **se;
 253        /* runqueue "owned" by this group on each cpu */
 254        struct cfs_rq **cfs_rq;
 255        unsigned long shares;
 256#endif
 257
 258#ifdef CONFIG_RT_GROUP_SCHED
 259        struct sched_rt_entity **rt_se;
 260        struct rt_rq **rt_rq;
 261
 262        struct rt_bandwidth rt_bandwidth;
 263#endif
 264
 265        struct rcu_head rcu;
 266        struct list_head list;
 267
 268        struct task_group *parent;
 269        struct list_head siblings;
 270        struct list_head children;
 271};
 272
 273#define root_task_group init_task_group
 274
 275/* task_group_lock serializes add/remove of task groups and also changes to
 276 * a task group's cpu shares.
 277 */
 278static DEFINE_SPINLOCK(task_group_lock);
 279
 280#ifdef CONFIG_FAIR_GROUP_SCHED
 281
 282#ifdef CONFIG_SMP
 283static int root_task_group_empty(void)
 284{
 285        return list_empty(&root_task_group.children);
 286}
 287#endif
 288
 289# define INIT_TASK_GROUP_LOAD   NICE_0_LOAD
 290
 291/*
 292 * A weight of 0 or 1 can cause arithmetics problems.
 293 * A weight of a cfs_rq is the sum of weights of which entities
 294 * are queued on this cfs_rq, so a weight of a entity should not be
 295 * too large, so as the shares value of a task group.
 296 * (The default weight is 1024 - so there's no practical
 297 *  limitation from this.)
 298 */
 299#define MIN_SHARES      2
 300#define MAX_SHARES      (1UL << 18)
 301
 302static int init_task_group_load = INIT_TASK_GROUP_LOAD;
 303#endif
 304
 305/* Default task group.
 306 *      Every task in system belong to this group at bootup.
 307 */
 308struct task_group init_task_group;
 309
 310#endif  /* CONFIG_CGROUP_SCHED */
 311
 312/* CFS-related fields in a runqueue */
 313struct cfs_rq {
 314        struct load_weight load;
 315        unsigned long nr_running;
 316
 317        u64 exec_clock;
 318        u64 min_vruntime;
 319
 320        struct rb_root tasks_timeline;
 321        struct rb_node *rb_leftmost;
 322
 323        struct list_head tasks;
 324        struct list_head *balance_iterator;
 325
 326        /*
 327         * 'curr' points to currently running entity on this cfs_rq.
 328         * It is set to NULL otherwise (i.e when none are currently running).
 329         */
 330        struct sched_entity *curr, *next, *last;
 331
 332        unsigned int nr_spread_over;
 333
 334#ifdef CONFIG_FAIR_GROUP_SCHED
 335        struct rq *rq;  /* cpu runqueue to which this cfs_rq is attached */
 336
 337        /*
 338         * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
 339         * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
 340         * (like users, containers etc.)
 341         *
 342         * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
 343         * list is used during load balance.
 344         */
 345        struct list_head leaf_cfs_rq_list;
 346        struct task_group *tg;  /* group that "owns" this runqueue */
 347
 348#ifdef CONFIG_SMP
 349        /*
 350         * the part of load.weight contributed by tasks
 351         */
 352        unsigned long task_weight;
 353
 354        /*
 355         *   h_load = weight * f(tg)
 356         *
 357         * Where f(tg) is the recursive weight fraction assigned to
 358         * this group.
 359         */
 360        unsigned long h_load;
 361
 362        /*
 363         * this cpu's part of tg->shares
 364         */
 365        unsigned long shares;
 366
 367        /*
 368         * load.weight at the time we set shares
 369         */
 370        unsigned long rq_weight;
 371#endif
 372#endif
 373};
 374
 375/* Real-Time classes' related field in a runqueue: */
 376struct rt_rq {
 377        struct rt_prio_array active;
 378        unsigned long rt_nr_running;
 379#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
 380        struct {
 381                int curr; /* highest queued rt task prio */
 382#ifdef CONFIG_SMP
 383                int next; /* next highest */
 384#endif
 385        } highest_prio;
 386#endif
 387#ifdef CONFIG_SMP
 388        unsigned long rt_nr_migratory;
 389        unsigned long rt_nr_total;
 390        int overloaded;
 391        struct plist_head pushable_tasks;
 392#endif
 393        int rt_throttled;
 394        u64 rt_time;
 395        u64 rt_runtime;
 396        /* Nests inside the rq lock: */
 397        raw_spinlock_t rt_runtime_lock;
 398
 399#ifdef CONFIG_RT_GROUP_SCHED
 400        unsigned long rt_nr_boosted;
 401
 402        struct rq *rq;
 403        struct list_head leaf_rt_rq_list;
 404        struct task_group *tg;
 405#endif
 406};
 407
 408#ifdef CONFIG_SMP
 409
 410/*
 411 * We add the notion of a root-domain which will be used to define per-domain
 412 * variables. Each exclusive cpuset essentially defines an island domain by
 413 * fully partitioning the member cpus from any other cpuset. Whenever a new
 414 * exclusive cpuset is created, we also create and attach a new root-domain
 415 * object.
 416 *
 417 */
 418struct root_domain {
 419        atomic_t refcount;
 420        cpumask_var_t span;
 421        cpumask_var_t online;
 422
 423        /*
 424         * The "RT overload" flag: it gets set if a CPU has more than
 425         * one runnable RT task.
 426         */
 427        cpumask_var_t rto_mask;
 428        atomic_t rto_count;
 429#ifdef CONFIG_SMP
 430        struct cpupri cpupri;
 431#endif
 432};
 433
 434/*
 435 * By default the system creates a single root-domain with all cpus as
 436 * members (mimicking the global state we have today).
 437 */
 438static struct root_domain def_root_domain;
 439
 440#endif
 441
 442/*
 443 * This is the main, per-CPU runqueue data structure.
 444 *
 445 * Locking rule: those places that want to lock multiple runqueues
 446 * (such as the load balancing or the thread migration code), lock
 447 * acquire operations must be ordered by ascending &runqueue.
 448 */
 449struct rq {
 450        /* runqueue lock: */
 451        raw_spinlock_t lock;
 452
 453        /*
 454         * nr_running and cpu_load should be in the same cacheline because
 455         * remote CPUs use both these fields when doing load calculation.
 456         */
 457        unsigned long nr_running;
 458        #define CPU_LOAD_IDX_MAX 5
 459        unsigned long cpu_load[CPU_LOAD_IDX_MAX];
 460        unsigned long last_load_update_tick;
 461#ifdef CONFIG_NO_HZ
 462        u64 nohz_stamp;
 463        unsigned char nohz_balance_kick;
 464#endif
 465        unsigned int skip_clock_update;
 466
 467        /* capture load from *all* tasks on this cpu: */
 468        struct load_weight load;
 469        unsigned long nr_load_updates;
 470        u64 nr_switches;
 471
 472        struct cfs_rq cfs;
 473        struct rt_rq rt;
 474
 475#ifdef CONFIG_FAIR_GROUP_SCHED
 476        /* list of leaf cfs_rq on this cpu: */
 477        struct list_head leaf_cfs_rq_list;
 478#endif
 479#ifdef CONFIG_RT_GROUP_SCHED
 480        struct list_head leaf_rt_rq_list;
 481#endif
 482
 483        /*
 484         * This is part of a global counter where only the total sum
 485         * over all CPUs matters. A task can increase this counter on
 486         * one CPU and if it got migrated afterwards it may decrease
 487         * it on another CPU. Always updated under the runqueue lock:
 488         */
 489        unsigned long nr_uninterruptible;
 490
 491        struct task_struct *curr, *idle;
 492        unsigned long next_balance;
 493        struct mm_struct *prev_mm;
 494
 495        u64 clock;
 496
 497        atomic_t nr_iowait;
 498
 499#ifdef CONFIG_SMP
 500        struct root_domain *rd;
 501        struct sched_domain *sd;
 502
 503        unsigned long cpu_power;
 504
 505        unsigned char idle_at_tick;
 506        /* For active balancing */
 507        int post_schedule;
 508        int active_balance;
 509        int push_cpu;
 510        struct cpu_stop_work active_balance_work;
 511        /* cpu of this runqueue: */
 512        int cpu;
 513        int online;
 514
 515        unsigned long avg_load_per_task;
 516
 517        u64 rt_avg;
 518        u64 age_stamp;
 519        u64 idle_stamp;
 520        u64 avg_idle;
 521#endif
 522
 523        /* calc_load related fields */
 524        unsigned long calc_load_update;
 525        long calc_load_active;
 526
 527#ifdef CONFIG_SCHED_HRTICK
 528#ifdef CONFIG_SMP
 529        int hrtick_csd_pending;
 530        struct call_single_data hrtick_csd;
 531#endif
 532        struct hrtimer hrtick_timer;
 533#endif
 534
 535#ifdef CONFIG_SCHEDSTATS
 536        /* latency stats */
 537        struct sched_info rq_sched_info;
 538        unsigned long long rq_cpu_time;
 539        /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
 540
 541        /* sys_sched_yield() stats */
 542        unsigned int yld_count;
 543
 544        /* schedule() stats */
 545        unsigned int sched_switch;
 546        unsigned int sched_count;
 547        unsigned int sched_goidle;
 548
 549        /* try_to_wake_up() stats */
 550        unsigned int ttwu_count;
 551        unsigned int ttwu_local;
 552
 553        /* BKL stats */
 554        unsigned int bkl_count;
 555#endif
 556};
 557
 558static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 559
 560static inline
 561void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
 562{
 563        rq->curr->sched_class->check_preempt_curr(rq, p, flags);
 564
 565        /*
 566         * A queue event has occurred, and we're going to schedule.  In
 567         * this case, we can save a useless back to back clock update.
 568         */
 569        if (test_tsk_need_resched(p))
 570                rq->skip_clock_update = 1;
 571}
 572
 573static inline int cpu_of(struct rq *rq)
 574{
 575#ifdef CONFIG_SMP
 576        return rq->cpu;
 577#else
 578        return 0;
 579#endif
 580}
 581
 582#define rcu_dereference_check_sched_domain(p) \
 583        rcu_dereference_check((p), \
 584                              rcu_read_lock_sched_held() || \
 585                              lockdep_is_held(&sched_domains_mutex))
 586
 587/*
 588 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
 589 * See detach_destroy_domains: synchronize_sched for details.
 590 *
 591 * The domain tree of any CPU may only be accessed from within
 592 * preempt-disabled sections.
 593 */
 594#define for_each_domain(cpu, __sd) \
 595        for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
 596
 597#define cpu_rq(cpu)             (&per_cpu(runqueues, (cpu)))
 598#define this_rq()               (&__get_cpu_var(runqueues))
 599#define task_rq(p)              cpu_rq(task_cpu(p))
 600#define cpu_curr(cpu)           (cpu_rq(cpu)->curr)
 601#define raw_rq()                (&__raw_get_cpu_var(runqueues))
 602
 603#ifdef CONFIG_CGROUP_SCHED
 604
 605/*
 606 * Return the group to which this tasks belongs.
 607 *
 608 * We use task_subsys_state_check() and extend the RCU verification
 609 * with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach()
 610 * holds that lock for each task it moves into the cgroup. Therefore
 611 * by holding that lock, we pin the task to the current cgroup.
 612 */
 613static inline struct task_group *task_group(struct task_struct *p)
 614{
 615        struct cgroup_subsys_state *css;
 616
 617        css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
 618                        lockdep_is_held(&task_rq(p)->lock));
 619        return container_of(css, struct task_group, css);
 620}
 621
 622/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
 623static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
 624{
 625#ifdef CONFIG_FAIR_GROUP_SCHED
 626        p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
 627        p->se.parent = task_group(p)->se[cpu];
 628#endif
 629
 630#ifdef CONFIG_RT_GROUP_SCHED
 631        p->rt.rt_rq  = task_group(p)->rt_rq[cpu];
 632        p->rt.parent = task_group(p)->rt_se[cpu];
 633#endif
 634}
 635
 636#else /* CONFIG_CGROUP_SCHED */
 637
 638static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
 639static inline struct task_group *task_group(struct task_struct *p)
 640{
 641        return NULL;
 642}
 643
 644#endif /* CONFIG_CGROUP_SCHED */
 645
 646inline void update_rq_clock(struct rq *rq)
 647{
 648        if (!rq->skip_clock_update)
 649                rq->clock = sched_clock_cpu(cpu_of(rq));
 650}
 651
 652/*
 653 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
 654 */
 655#ifdef CONFIG_SCHED_DEBUG
 656# define const_debug __read_mostly
 657#else
 658# define const_debug static const
 659#endif
 660
 661/**
 662 * runqueue_is_locked
 663 * @cpu: the processor in question.
 664 *
 665 * Returns true if the current cpu runqueue is locked.
 666 * This interface allows printk to be called with the runqueue lock
 667 * held and know whether or not it is OK to wake up the klogd.
 668 */
 669int runqueue_is_locked(int cpu)
 670{
 671        return raw_spin_is_locked(&cpu_rq(cpu)->lock);
 672}
 673
 674/*
 675 * Debugging: various feature bits
 676 */
 677
 678#define SCHED_FEAT(name, enabled)       \
 679        __SCHED_FEAT_##name ,
 680
 681enum {
 682#include "sched_features.h"
 683};
 684
 685#undef SCHED_FEAT
 686
 687#define SCHED_FEAT(name, enabled)       \
 688        (1UL << __SCHED_FEAT_##name) * enabled |
 689
 690const_debug unsigned int sysctl_sched_features =
 691#include "sched_features.h"
 692        0;
 693
 694#undef SCHED_FEAT
 695
 696#ifdef CONFIG_SCHED_DEBUG
 697#define SCHED_FEAT(name, enabled)       \
 698        #name ,
 699
 700static __read_mostly char *sched_feat_names[] = {
 701#include "sched_features.h"
 702        NULL
 703};
 704
 705#undef SCHED_FEAT
 706
 707static int sched_feat_show(struct seq_file *m, void *v)
 708{
 709        int i;
 710
 711        for (i = 0; sched_feat_names[i]; i++) {
 712                if (!(sysctl_sched_features & (1UL << i)))
 713                        seq_puts(m, "NO_");
 714                seq_printf(m, "%s ", sched_feat_names[i]);
 715        }
 716        seq_puts(m, "\n");
 717
 718        return 0;
 719}
 720
 721static ssize_t
 722sched_feat_write(struct file *filp, const char __user *ubuf,
 723                size_t cnt, loff_t *ppos)
 724{
 725        char buf[64];
 726        char *cmp = buf;
 727        int neg = 0;
 728        int i;
 729
 730        if (cnt > 63)
 731                cnt = 63;
 732
 733        if (copy_from_user(&buf, ubuf, cnt))
 734                return -EFAULT;
 735
 736        buf[cnt] = 0;
 737
 738        if (strncmp(buf, "NO_", 3) == 0) {
 739                neg = 1;
 740                cmp += 3;
 741        }
 742
 743        for (i = 0; sched_feat_names[i]; i++) {
 744                int len = strlen(sched_feat_names[i]);
 745
 746                if (strncmp(cmp, sched_feat_names[i], len) == 0) {
 747                        if (neg)
 748                                sysctl_sched_features &= ~(1UL << i);
 749                        else
 750                                sysctl_sched_features |= (1UL << i);
 751                        break;
 752                }
 753        }
 754
 755        if (!sched_feat_names[i])
 756                return -EINVAL;
 757
 758        *ppos += cnt;
 759
 760        return cnt;
 761}
 762
 763static int sched_feat_open(struct inode *inode, struct file *filp)
 764{
 765        return single_open(filp, sched_feat_show, NULL);
 766}
 767
 768static const struct file_operations sched_feat_fops = {
 769        .open           = sched_feat_open,
 770        .write          = sched_feat_write,
 771        .read           = seq_read,
 772        .llseek         = seq_lseek,
 773        .release        = single_release,
 774};
 775
 776static __init int sched_init_debug(void)
 777{
 778        debugfs_create_file("sched_features", 0644, NULL, NULL,
 779                        &sched_feat_fops);
 780
 781        return 0;
 782}
 783late_initcall(sched_init_debug);
 784
 785#endif
 786
 787#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
 788
 789/*
 790 * Number of tasks to iterate in a single balance run.
 791 * Limited because this is done with IRQs disabled.
 792 */
 793const_debug unsigned int sysctl_sched_nr_migrate = 32;
 794
 795/*
 796 * ratelimit for updating the group shares.
 797 * default: 0.25ms
 798 */
 799unsigned int sysctl_sched_shares_ratelimit = 250000;
 800unsigned int normalized_sysctl_sched_shares_ratelimit = 250000;
 801
 802/*
 803 * Inject some fuzzyness into changing the per-cpu group shares
 804 * this avoids remote rq-locks at the expense of fairness.
 805 * default: 4
 806 */
 807unsigned int sysctl_sched_shares_thresh = 4;
 808
 809/*
 810 * period over which we average the RT time consumption, measured
 811 * in ms.
 812 *
 813 * default: 1s
 814 */
 815const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
 816
 817/*
 818 * period over which we measure -rt task cpu usage in us.
 819 * default: 1s
 820 */
 821unsigned int sysctl_sched_rt_period = 1000000;
 822
 823static __read_mostly int scheduler_running;
 824
 825/*
 826 * part of the period that we allow rt tasks to run in us.
 827 * default: 0.95s
 828 */
 829int sysctl_sched_rt_runtime = 950000;
 830
 831static inline u64 global_rt_period(void)
 832{
 833        return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
 834}
 835
 836static inline u64 global_rt_runtime(void)
 837{
 838        if (sysctl_sched_rt_runtime < 0)
 839                return RUNTIME_INF;
 840
 841        return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
 842}
 843
 844#ifndef prepare_arch_switch
 845# define prepare_arch_switch(next)      do { } while (0)
 846#endif
 847#ifndef finish_arch_switch
 848# define finish_arch_switch(prev)       do { } while (0)
 849#endif
 850
 851static inline int task_current(struct rq *rq, struct task_struct *p)
 852{
 853        return rq->curr == p;
 854}
 855
 856#ifndef __ARCH_WANT_UNLOCKED_CTXSW
 857static inline int task_running(struct rq *rq, struct task_struct *p)
 858{
 859        return task_current(rq, p);
 860}
 861
 862static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
 863{
 864}
 865
 866static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
 867{
 868#ifdef CONFIG_DEBUG_SPINLOCK
 869        /* this is a valid case when another task releases the spinlock */
 870        rq->lock.owner = current;
 871#endif
 872        /*
 873         * If we are tracking spinlock dependencies then we have to
 874         * fix up the runqueue lock - which gets 'carried over' from
 875         * prev into current:
 876         */
 877        spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
 878
 879        raw_spin_unlock_irq(&rq->lock);
 880}
 881
 882#else /* __ARCH_WANT_UNLOCKED_CTXSW */
 883static inline int task_running(struct rq *rq, struct task_struct *p)
 884{
 885#ifdef CONFIG_SMP
 886        return p->oncpu;
 887#else
 888        return task_current(rq, p);
 889#endif
 890}
 891
 892static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
 893{
 894#ifdef CONFIG_SMP
 895        /*
 896         * We can optimise this out completely for !SMP, because the
 897         * SMP rebalancing from interrupt is the only thing that cares
 898         * here.
 899         */
 900        next->oncpu = 1;
 901#endif
 902#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
 903        raw_spin_unlock_irq(&rq->lock);
 904#else
 905        raw_spin_unlock(&rq->lock);
 906#endif
 907}
 908
 909static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
 910{
 911#ifdef CONFIG_SMP
 912        /*
 913         * After ->oncpu is cleared, the task can be moved to a different CPU.
 914         * We must ensure this doesn't happen until the switch is completely
 915         * finished.
 916         */
 917        smp_wmb();
 918        prev->oncpu = 0;
 919#endif
 920#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
 921        local_irq_enable();
 922#endif
 923}
 924#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
 925
 926/*
 927 * Check whether the task is waking, we use this to synchronize ->cpus_allowed
 928 * against ttwu().
 929 */
 930static inline int task_is_waking(struct task_struct *p)
 931{
 932        return unlikely(p->state == TASK_WAKING);
 933}
 934
 935/*
 936 * __task_rq_lock - lock the runqueue a given task resides on.
 937 * Must be called interrupts disabled.
 938 */
 939static inline struct rq *__task_rq_lock(struct task_struct *p)
 940        __acquires(rq->lock)
 941{
 942        struct rq *rq;
 943
 944        for (;;) {
 945                rq = task_rq(p);
 946                raw_spin_lock(&rq->lock);
 947                if (likely(rq == task_rq(p)))
 948                        return rq;
 949                raw_spin_unlock(&rq->lock);
 950        }
 951}
 952
 953/*
 954 * task_rq_lock - lock the runqueue a given task resides on and disable
 955 * interrupts. Note the ordering: we can safely lookup the task_rq without
 956 * explicitly disabling preemption.
 957 */
 958static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
 959        __acquires(rq->lock)
 960{
 961        struct rq *rq;
 962
 963        for (;;) {
 964                local_irq_save(*flags);
 965                rq = task_rq(p);
 966                raw_spin_lock(&rq->lock);
 967                if (likely(rq == task_rq(p)))
 968                        return rq;
 969                raw_spin_unlock_irqrestore(&rq->lock, *flags);
 970        }
 971}
 972
 973static void __task_rq_unlock(struct rq *rq)
 974        __releases(rq->lock)
 975{
 976        raw_spin_unlock(&rq->lock);
 977}
 978
 979static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
 980        __releases(rq->lock)
 981{
 982        raw_spin_unlock_irqrestore(&rq->lock, *flags);
 983}
 984
 985/*
 986 * this_rq_lock - lock this runqueue and disable interrupts.
 987 */
 988static struct rq *this_rq_lock(void)
 989        __acquires(rq->lock)
 990{
 991        struct rq *rq;
 992
 993        local_irq_disable();
 994        rq = this_rq();
 995        raw_spin_lock(&rq->lock);
 996
 997        return rq;
 998}
 999
1000#ifdef CONFIG_SCHED_HRTICK
1001/*
1002 * Use HR-timers to deliver accurate preemption points.
1003 *
1004 * Its all a bit involved since we cannot program an hrt while holding the
1005 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
1006 * reschedule event.
1007 *
1008 * When we get rescheduled we reprogram the hrtick_timer outside of the
1009 * rq->lock.
1010 */
1011
1012/*
1013 * Use hrtick when:
1014 *  - enabled by features
1015 *  - hrtimer is actually high res
1016 */
1017static inline int hrtick_enabled(struct rq *rq)
1018{
1019        if (!sched_feat(HRTICK))
1020                return 0;
1021        if (!cpu_active(cpu_of(rq)))
1022                return 0;
1023        return hrtimer_is_hres_active(&rq->hrtick_timer);
1024}
1025
1026static void hrtick_clear(struct rq *rq)
1027{
1028        if (hrtimer_active(&rq->hrtick_timer))
1029                hrtimer_cancel(&rq->hrtick_timer);
1030}
1031
1032/*
1033 * High-resolution timer tick.
1034 * Runs from hardirq context with interrupts disabled.
1035 */
1036static enum hrtimer_restart hrtick(struct hrtimer *timer)
1037{
1038        struct rq *rq = container_of(timer, struct rq, hrtick_timer);
1039
1040        WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1041
1042        raw_spin_lock(&rq->lock);
1043        update_rq_clock(rq);
1044        rq->curr->sched_class->task_tick(rq, rq->curr, 1);
1045        raw_spin_unlock(&rq->lock);
1046
1047        return HRTIMER_NORESTART;
1048}
1049
1050#ifdef CONFIG_SMP
1051/*
1052 * called from hardirq (IPI) context
1053 */
1054static void __hrtick_start(void *arg)
1055{
1056        struct rq *rq = arg;
1057
1058        raw_spin_lock(&rq->lock);
1059        hrtimer_restart(&rq->hrtick_timer);
1060        rq->hrtick_csd_pending = 0;
1061        raw_spin_unlock(&rq->lock);
1062}
1063
1064/*
1065 * Called to set the hrtick timer state.
1066 *
1067 * called with rq->lock held and irqs disabled
1068 */
1069static void hrtick_start(struct rq *rq, u64 delay)
1070{
1071        struct hrtimer *timer = &rq->hrtick_timer;
1072        ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
1073
1074        hrtimer_set_expires(timer, time);
1075
1076        if (rq == this_rq()) {
1077                hrtimer_restart(timer);
1078        } else if (!rq->hrtick_csd_pending) {
1079                __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
1080                rq->hrtick_csd_pending = 1;
1081        }
1082}
1083
1084static int
1085hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
1086{
1087        int cpu = (int)(long)hcpu;
1088
1089        switch (action) {
1090        case CPU_UP_CANCELED:
1091        case CPU_UP_CANCELED_FROZEN:
1092        case CPU_DOWN_PREPARE:
1093        case CPU_DOWN_PREPARE_FROZEN:
1094        case CPU_DEAD:
1095        case CPU_DEAD_FROZEN:
1096                hrtick_clear(cpu_rq(cpu));
1097                return NOTIFY_OK;
1098        }
1099
1100        return NOTIFY_DONE;
1101}
1102
1103static __init void init_hrtick(void)
1104{
1105        hotcpu_notifier(hotplug_hrtick, 0);
1106}
1107#else
1108/*
1109 * Called to set the hrtick timer state.
1110 *
1111 * called with rq->lock held and irqs disabled
1112 */
1113static void hrtick_start(struct rq *rq, u64 delay)
1114{
1115        __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
1116                        HRTIMER_MODE_REL_PINNED, 0);
1117}
1118
1119static inline void init_hrtick(void)
1120{
1121}
1122#endif /* CONFIG_SMP */
1123
1124static void init_rq_hrtick(struct rq *rq)
1125{
1126#ifdef CONFIG_SMP
1127        rq->hrtick_csd_pending = 0;
1128
1129        rq->hrtick_csd.flags = 0;
1130        rq->hrtick_csd.func = __hrtick_start;
1131        rq->hrtick_csd.info = rq;
1132#endif
1133
1134        hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1135        rq->hrtick_timer.function = hrtick;
1136}
1137#else   /* CONFIG_SCHED_HRTICK */
1138static inline void hrtick_clear(struct rq *rq)
1139{
1140}
1141
1142static inline void init_rq_hrtick(struct rq *rq)
1143{
1144}
1145
1146static inline void init_hrtick(void)
1147{
1148}
1149#endif  /* CONFIG_SCHED_HRTICK */
1150
1151/*
1152 * resched_task - mark a task 'to be rescheduled now'.
1153 *
1154 * On UP this means the setting of the need_resched flag, on SMP it
1155 * might also involve a cross-CPU call to trigger the scheduler on
1156 * the target CPU.
1157 */
1158#ifdef CONFIG_SMP
1159
1160#ifndef tsk_is_polling
1161#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
1162#endif
1163
1164static void resched_task(struct task_struct *p)
1165{
1166        int cpu;
1167
1168        assert_raw_spin_locked(&task_rq(p)->lock);
1169
1170        if (test_tsk_need_resched(p))
1171                return;
1172
1173        set_tsk_need_resched(p);
1174
1175        cpu = task_cpu(p);
1176        if (cpu == smp_processor_id())
1177                return;
1178
1179        /* NEED_RESCHED must be visible before we test polling */
1180        smp_mb();
1181        if (!tsk_is_polling(p))
1182                smp_send_reschedule(cpu);
1183}
1184
1185static void resched_cpu(int cpu)
1186{
1187        struct rq *rq = cpu_rq(cpu);
1188        unsigned long flags;
1189
1190        if (!raw_spin_trylock_irqsave(&rq->lock, flags))
1191                return;
1192        resched_task(cpu_curr(cpu));
1193        raw_spin_unlock_irqrestore(&rq->lock, flags);
1194}
1195
1196#ifdef CONFIG_NO_HZ
1197/*
1198 * In the semi idle case, use the nearest busy cpu for migrating timers
1199 * from an idle cpu.  This is good for power-savings.
1200 *
1201 * We don't do similar optimization for completely idle system, as
1202 * selecting an idle cpu will add more delays to the timers than intended
1203 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
1204 */
1205int get_nohz_timer_target(void)
1206{
1207        int cpu = smp_processor_id();
1208        int i;
1209        struct sched_domain *sd;
1210
1211        for_each_domain(cpu, sd) {
1212                for_each_cpu(i, sched_domain_span(sd))
1213                        if (!idle_cpu(i))
1214                                return i;
1215        }
1216        return cpu;
1217}
1218/*
1219 * When add_timer_on() enqueues a timer into the timer wheel of an
1220 * idle CPU then this timer might expire before the next timer event
1221 * which is scheduled to wake up that CPU. In case of a completely
1222 * idle system the next event might even be infinite time into the
1223 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1224 * leaves the inner idle loop so the newly added timer is taken into
1225 * account when the CPU goes back to idle and evaluates the timer
1226 * wheel for the next timer event.
1227 */
1228void wake_up_idle_cpu(int cpu)
1229{
1230        struct rq *rq = cpu_rq(cpu);
1231
1232        if (cpu == smp_processor_id())
1233                return;
1234
1235        /*
1236         * This is safe, as this function is called with the timer
1237         * wheel base lock of (cpu) held. When the CPU is on the way
1238         * to idle and has not yet set rq->curr to idle then it will
1239         * be serialized on the timer wheel base lock and take the new
1240         * timer into account automatically.
1241         */
1242        if (rq->curr != rq->idle)
1243                return;
1244
1245        /*
1246         * We can set TIF_RESCHED on the idle task of the other CPU
1247         * lockless. The worst case is that the other CPU runs the
1248         * idle task through an additional NOOP schedule()
1249         */
1250        set_tsk_need_resched(rq->idle);
1251
1252        /* NEED_RESCHED must be visible before we test polling */
1253        smp_mb();
1254        if (!tsk_is_polling(rq->idle))
1255                smp_send_reschedule(cpu);
1256}
1257
1258#endif /* CONFIG_NO_HZ */
1259
1260static u64 sched_avg_period(void)
1261{
1262        return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1263}
1264
1265static void sched_avg_update(struct rq *rq)
1266{
1267        s64 period = sched_avg_period();
1268
1269        while ((s64)(rq->clock - rq->age_stamp) > period) {
1270                /*
1271                 * Inline assembly required to prevent the compiler
1272                 * optimising this loop into a divmod call.
1273                 * See __iter_div_u64_rem() for another example of this.
1274                 */
1275                asm("" : "+rm" (rq->age_stamp));
1276                rq->age_stamp += period;
1277                rq->rt_avg /= 2;
1278        }
1279}
1280
1281static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1282{
1283        rq->rt_avg += rt_delta;
1284        sched_avg_update(rq);
1285}
1286
1287#else /* !CONFIG_SMP */
1288static void resched_task(struct task_struct *p)
1289{
1290        assert_raw_spin_locked(&task_rq(p)->lock);
1291        set_tsk_need_resched(p);
1292}
1293
1294static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1295{
1296}
1297
1298static void sched_avg_update(struct rq *rq)
1299{
1300}
1301#endif /* CONFIG_SMP */
1302
1303#if BITS_PER_LONG == 32
1304# define WMULT_CONST    (~0UL)
1305#else
1306# define WMULT_CONST    (1UL << 32)
1307#endif
1308
1309#define WMULT_SHIFT     32
1310
1311/*
1312 * Shift right and round:
1313 */
1314#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
1315
1316/*
1317 * delta *= weight / lw
1318 */
1319static unsigned long
1320calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1321                struct load_weight *lw)
1322{
1323        u64 tmp;
1324
1325        if (!lw->inv_weight) {
1326                if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST))
1327                        lw->inv_weight = 1;
1328                else
1329                        lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)
1330                                / (lw->weight+1);
1331        }
1332
1333        tmp = (u64)delta_exec * weight;
1334        /*
1335         * Check whether we'd overflow the 64-bit multiplication:
1336         */
1337        if (unlikely(tmp > WMULT_CONST))
1338                tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
1339                        WMULT_SHIFT/2);
1340        else
1341                tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
1342
1343        return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
1344}
1345
1346static inline void update_load_add(struct load_weight *lw, unsigned long inc)
1347{
1348        lw->weight += inc;
1349        lw->inv_weight = 0;
1350}
1351
1352static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
1353{
1354        lw->weight -= dec;
1355        lw->inv_weight = 0;
1356}
1357
1358/*
1359 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1360 * of tasks with abnormal "nice" values across CPUs the contribution that
1361 * each task makes to its run queue's load is weighted according to its
1362 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1363 * scaled version of the new time slice allocation that they receive on time
1364 * slice expiry etc.
1365 */
1366
1367#define WEIGHT_IDLEPRIO                3
1368#define WMULT_IDLEPRIO         1431655765
1369
1370/*
1371 * Nice levels are multiplicative, with a gentle 10% change for every
1372 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1373 * nice 1, it will get ~10% less CPU time than another CPU-bound task
1374 * that remained on nice 0.
1375 *
1376 * The "10% effect" is relative and cumulative: from _any_ nice level,
1377 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
1378 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1379 * If a task goes up by ~10% and another task goes down by ~10% then
1380 * the relative distance between them is ~25%.)
1381 */
1382static const int prio_to_weight[40] = {
1383 /* -20 */     88761,     71755,     56483,     46273,     36291,
1384 /* -15 */     29154,     23254,     18705,     14949,     11916,
1385 /* -10 */      9548,      7620,      6100,      4904,      3906,
1386 /*  -5 */      3121,      2501,      1991,      1586,      1277,
1387 /*   0 */      1024,       820,       655,       526,       423,
1388 /*   5 */       335,       272,       215,       172,       137,
1389 /*  10 */       110,        87,        70,        56,        45,
1390 /*  15 */        36,        29,        23,        18,        15,
1391};
1392
1393/*
1394 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
1395 *
1396 * In cases where the weight does not change often, we can use the
1397 * precalculated inverse to speed up arithmetics by turning divisions
1398 * into multiplications:
1399 */
1400static const u32 prio_to_wmult[40] = {
1401 /* -20 */     48388,     59856,     76040,     92818,    118348,
1402 /* -15 */    147320,    184698,    229616,    287308,    360437,
1403 /* -10 */    449829,    563644,    704093,    875809,   1099582,
1404 /*  -5 */   1376151,   1717300,   2157191,   2708050,   3363326,
1405 /*   0 */   4194304,   5237765,   6557202,   8165337,  10153587,
1406 /*   5 */  12820798,  15790321,  19976592,  24970740,  31350126,
1407 /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
1408 /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
1409};
1410
1411/* Time spent by the tasks of the cpu accounting group executing in ... */
1412enum cpuacct_stat_index {
1413        CPUACCT_STAT_USER,      /* ... user mode */
1414        CPUACCT_STAT_SYSTEM,    /* ... kernel mode */
1415
1416        CPUACCT_STAT_NSTATS,
1417};
1418
1419#ifdef CONFIG_CGROUP_CPUACCT
1420static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
1421static void cpuacct_update_stats(struct task_struct *tsk,
1422                enum cpuacct_stat_index idx, cputime_t val);
1423#else
1424static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
1425static inline void cpuacct_update_stats(struct task_struct *tsk,
1426                enum cpuacct_stat_index idx, cputime_t val) {}
1427#endif
1428
1429static inline void inc_cpu_load(struct rq *rq, unsigned long load)
1430{
1431        update_load_add(&rq->load, load);
1432}
1433
1434static inline void dec_cpu_load(struct rq *rq, unsigned long load)
1435{
1436        update_load_sub(&rq->load, load);
1437}
1438
1439#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
1440typedef int (*tg_visitor)(struct task_group *, void *);
1441
1442/*
1443 * Iterate the full tree, calling @down when first entering a node and @up when
1444 * leaving it for the final time.
1445 */
1446static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
1447{
1448        struct task_group *parent, *child;
1449        int ret;
1450
1451        rcu_read_lock();
1452        parent = &root_task_group;
1453down:
1454        ret = (*down)(parent, data);
1455        if (ret)
1456                goto out_unlock;
1457        list_for_each_entry_rcu(child, &parent->children, siblings) {
1458                parent = child;
1459                goto down;
1460
1461up:
1462                continue;
1463        }
1464        ret = (*up)(parent, data);
1465        if (ret)
1466                goto out_unlock;
1467
1468        child = parent;
1469        parent = parent->parent;
1470        if (parent)
1471                goto up;
1472out_unlock:
1473        rcu_read_unlock();
1474
1475        return ret;
1476}
1477
1478static int tg_nop(struct task_group *tg, void *data)
1479{
1480        return 0;
1481}
1482#endif
1483
1484#ifdef CONFIG_SMP
1485/* Used instead of source_load when we know the type == 0 */
1486static unsigned long weighted_cpuload(const int cpu)
1487{
1488        return cpu_rq(cpu)->load.weight;
1489}
1490
1491/*
1492 * Return a low guess at the load of a migration-source cpu weighted
1493 * according to the scheduling class and "nice" value.
1494 *
1495 * We want to under-estimate the load of migration sources, to
1496 * balance conservatively.
1497 */
1498static unsigned long source_load(int cpu, int type)
1499{
1500        struct rq *rq = cpu_rq(cpu);
1501        unsigned long total = weighted_cpuload(cpu);
1502
1503        if (type == 0 || !sched_feat(LB_BIAS))
1504                return total;
1505
1506        return min(rq->cpu_load[type-1], total);
1507}
1508
1509/*
1510 * Return a high guess at the load of a migration-target cpu weighted
1511 * according to the scheduling class and "nice" value.
1512 */
1513static unsigned long target_load(int cpu, int type)
1514{
1515        struct rq *rq = cpu_rq(cpu);
1516        unsigned long total = weighted_cpuload(cpu);
1517
1518        if (type == 0 || !sched_feat(LB_BIAS))
1519                return total;
1520
1521        return max(rq->cpu_load[type-1], total);
1522}
1523
1524static unsigned long power_of(int cpu)
1525{
1526        return cpu_rq(cpu)->cpu_power;
1527}
1528
1529static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1530
1531static unsigned long cpu_avg_load_per_task(int cpu)
1532{
1533        struct rq *rq = cpu_rq(cpu);
1534        unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
1535
1536        if (nr_running)
1537                rq->avg_load_per_task = rq->load.weight / nr_running;
1538        else
1539                rq->avg_load_per_task = 0;
1540
1541        return rq->avg_load_per_task;
1542}
1543
1544#ifdef CONFIG_FAIR_GROUP_SCHED
1545
1546static __read_mostly unsigned long __percpu *update_shares_data;
1547
1548static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1549
1550/*
1551 * Calculate and set the cpu's group shares.
1552 */
1553static void update_group_shares_cpu(struct task_group *tg, int cpu,
1554                                    unsigned long sd_shares,
1555                                    unsigned long sd_rq_weight,
1556                                    unsigned long *usd_rq_weight)
1557{
1558        unsigned long shares, rq_weight;
1559        int boost = 0;
1560
1561        rq_weight = usd_rq_weight[cpu];
1562        if (!rq_weight) {
1563                boost = 1;
1564                rq_weight = NICE_0_LOAD;
1565        }
1566
1567        /*
1568         *             \Sum_j shares_j * rq_weight_i
1569         * shares_i =  -----------------------------
1570         *                  \Sum_j rq_weight_j
1571         */
1572        shares = (sd_shares * rq_weight) / sd_rq_weight;
1573        shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
1574
1575        if (abs(shares - tg->se[cpu]->load.weight) >
1576                        sysctl_sched_shares_thresh) {
1577                struct rq *rq = cpu_rq(cpu);
1578                unsigned long flags;
1579
1580                raw_spin_lock_irqsave(&rq->lock, flags);
1581                tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
1582                tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
1583                __set_se_shares(tg->se[cpu], shares);
1584                raw_spin_unlock_irqrestore(&rq->lock, flags);
1585        }
1586}
1587
1588/*
1589 * Re-compute the task group their per cpu shares over the given domain.
1590 * This needs to be done in a bottom-up fashion because the rq weight of a
1591 * parent group depends on the shares of its child groups.
1592 */
1593static int tg_shares_up(struct task_group *tg, void *data)
1594{
1595        unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0;
1596        unsigned long *usd_rq_weight;
1597        struct sched_domain *sd = data;
1598        unsigned long flags;
1599        int i;
1600
1601        if (!tg->se[0])
1602                return 0;
1603
1604        local_irq_save(flags);
1605        usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id());
1606
1607        for_each_cpu(i, sched_domain_span(sd)) {
1608                weight = tg->cfs_rq[i]->load.weight;
1609                usd_rq_weight[i] = weight;
1610
1611                rq_weight += weight;
1612                /*
1613                 * If there are currently no tasks on the cpu pretend there
1614                 * is one of average load so that when a new task gets to
1615                 * run here it will not get delayed by group starvation.
1616                 */
1617                if (!weight)
1618                        weight = NICE_0_LOAD;
1619
1620                sum_weight += weight;
1621                shares += tg->cfs_rq[i]->shares;
1622        }
1623
1624        if (!rq_weight)
1625                rq_weight = sum_weight;
1626
1627        if ((!shares && rq_weight) || shares > tg->shares)
1628                shares = tg->shares;
1629
1630        if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
1631                shares = tg->shares;
1632
1633        for_each_cpu(i, sched_domain_span(sd))
1634                update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight);
1635
1636        local_irq_restore(flags);
1637
1638        return 0;
1639}
1640
1641/*
1642 * Compute the cpu's hierarchical load factor for each task group.
1643 * This needs to be done in a top-down fashion because the load of a child
1644 * group is a fraction of its parents load.
1645 */
1646static int tg_load_down(struct task_group *tg, void *data)
1647{
1648        unsigned long load;
1649        long cpu = (long)data;
1650
1651        if (!tg->parent) {
1652                load = cpu_rq(cpu)->load.weight;
1653        } else {
1654                load = tg->parent->cfs_rq[cpu]->h_load;
1655                load *= tg->cfs_rq[cpu]->shares;
1656                load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
1657        }
1658
1659        tg->cfs_rq[cpu]->h_load = load;
1660
1661        return 0;
1662}
1663
1664static void update_shares(struct sched_domain *sd)
1665{
1666        s64 elapsed;
1667        u64 now;
1668
1669        if (root_task_group_empty())
1670                return;
1671
1672        now = local_clock();
1673        elapsed = now - sd->last_update;
1674
1675        if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
1676                sd->last_update = now;
1677                walk_tg_tree(tg_nop, tg_shares_up, sd);
1678        }
1679}
1680
1681static void update_h_load(long cpu)
1682{
1683        walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
1684}
1685
1686#else
1687
1688static inline void update_shares(struct sched_domain *sd)
1689{
1690}
1691
1692#endif
1693
1694#ifdef CONFIG_PREEMPT
1695
1696static void double_rq_lock(struct rq *rq1, struct rq *rq2);
1697
1698/*
1699 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1700 * way at the expense of forcing extra atomic operations in all
1701 * invocations.  This assures that the double_lock is acquired using the
1702 * same underlying policy as the spinlock_t on this architecture, which
1703 * reduces latency compared to the unfair variant below.  However, it
1704 * also adds more overhead and therefore may reduce throughput.
1705 */
1706static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1707        __releases(this_rq->lock)
1708        __acquires(busiest->lock)
1709        __acquires(this_rq->lock)
1710{
1711        raw_spin_unlock(&this_rq->lock);
1712        double_rq_lock(this_rq, busiest);
1713
1714        return 1;
1715}
1716
1717#else
1718/*
1719 * Unfair double_lock_balance: Optimizes throughput at the expense of
1720 * latency by eliminating extra atomic operations when the locks are
1721 * already in proper order on entry.  This favors lower cpu-ids and will
1722 * grant the double lock to lower cpus over higher ids under contention,
1723 * regardless of entry order into the function.
1724 */
1725static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1726        __releases(this_rq->lock)
1727        __acquires(busiest->lock)
1728        __acquires(this_rq->lock)
1729{
1730        int ret = 0;
1731
1732        if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1733                if (busiest < this_rq) {
1734                        raw_spin_unlock(&this_rq->lock);
1735                        raw_spin_lock(&busiest->lock);
1736                        raw_spin_lock_nested(&this_rq->lock,
1737                                              SINGLE_DEPTH_NESTING);
1738                        ret = 1;
1739                } else
1740                        raw_spin_lock_nested(&busiest->lock,
1741                                              SINGLE_DEPTH_NESTING);
1742        }
1743        return ret;
1744}
1745
1746#endif /* CONFIG_PREEMPT */
1747
1748/*
1749 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1750 */
1751static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1752{
1753        if (unlikely(!irqs_disabled())) {
1754                /* printk() doesn't work good under rq->lock */
1755                raw_spin_unlock(&this_rq->lock);
1756                BUG_ON(1);
1757        }
1758
1759        return _double_lock_balance(this_rq, busiest);
1760}
1761
1762static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1763        __releases(busiest->lock)
1764{
1765        raw_spin_unlock(&busiest->lock);
1766        lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1767}
1768
1769/*
1770 * double_rq_lock - safely lock two runqueues
1771 *
1772 * Note this does not disable interrupts like task_rq_lock,
1773 * you need to do so manually before calling.
1774 */
1775static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1776        __acquires(rq1->lock)
1777        __acquires(rq2->lock)
1778{
1779        BUG_ON(!irqs_disabled());
1780        if (rq1 == rq2) {
1781                raw_spin_lock(&rq1->lock);
1782                __acquire(rq2->lock);   /* Fake it out ;) */
1783        } else {
1784                if (rq1 < rq2) {
1785                        raw_spin_lock(&rq1->lock);
1786                        raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1787                } else {
1788                        raw_spin_lock(&rq2->lock);
1789                        raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1790                }
1791        }
1792}
1793
1794/*
1795 * double_rq_unlock - safely unlock two runqueues
1796 *
1797 * Note this does not restore interrupts like task_rq_unlock,
1798 * you need to do so manually after calling.
1799 */
1800static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1801        __releases(rq1->lock)
1802        __releases(rq2->lock)
1803{
1804        raw_spin_unlock(&rq1->lock);
1805        if (rq1 != rq2)
1806                raw_spin_unlock(&rq2->lock);
1807        else
1808                __release(rq2->lock);
1809}
1810
1811#endif
1812
1813#ifdef CONFIG_FAIR_GROUP_SCHED
1814static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
1815{
1816#ifdef CONFIG_SMP
1817        cfs_rq->shares = shares;
1818#endif
1819}
1820#endif
1821
1822static void calc_load_account_idle(struct rq *this_rq);
1823static void update_sysctl(void);
1824static int get_update_sysctl_factor(void);
1825static void update_cpu_load(struct rq *this_rq);
1826
1827static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1828{
1829        set_task_rq(p, cpu);
1830#ifdef CONFIG_SMP
1831        /*
1832         * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1833         * successfuly executed on another CPU. We must ensure that updates of
1834         * per-task data have been completed by this moment.
1835         */
1836        smp_wmb();
1837        task_thread_info(p)->cpu = cpu;
1838#endif
1839}
1840
1841static const struct sched_class rt_sched_class;
1842
1843#define sched_class_highest (&rt_sched_class)
1844#define for_each_class(class) \
1845   for (class = sched_class_highest; class; class = class->next)
1846
1847#include "sched_stats.h"
1848
1849static void inc_nr_running(struct rq *rq)
1850{
1851        rq->nr_running++;
1852}
1853
1854static void dec_nr_running(struct rq *rq)
1855{
1856        rq->nr_running--;
1857}
1858
1859static void set_load_weight(struct task_struct *p)
1860{
1861        if (task_has_rt_policy(p)) {
1862                p->se.load.weight = 0;
1863                p->se.load.inv_weight = WMULT_CONST;
1864                return;
1865        }
1866
1867        /*
1868         * SCHED_IDLE tasks get minimal weight:
1869         */
1870        if (p->policy == SCHED_IDLE) {
1871                p->se.load.weight = WEIGHT_IDLEPRIO;
1872                p->se.load.inv_weight = WMULT_IDLEPRIO;
1873                return;
1874        }
1875
1876        p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
1877        p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
1878}
1879
1880static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
1881{
1882        update_rq_clock(rq);
1883        sched_info_queued(p);
1884        p->sched_class->enqueue_task(rq, p, flags);
1885        p->se.on_rq = 1;
1886}
1887
1888static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
1889{
1890        update_rq_clock(rq);
1891        sched_info_dequeued(p);
1892        p->sched_class->dequeue_task(rq, p, flags);
1893        p->se.on_rq = 0;
1894}
1895
1896/*
1897 * activate_task - move a task to the runqueue.
1898 */
1899static void activate_task(struct rq *rq, struct task_struct *p, int flags)
1900{
1901        if (task_contributes_to_load(p))
1902                rq->nr_uninterruptible--;
1903
1904        enqueue_task(rq, p, flags);
1905        inc_nr_running(rq);
1906}
1907
1908/*
1909 * deactivate_task - remove a task from the runqueue.
1910 */
1911static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1912{
1913        if (task_contributes_to_load(p))
1914                rq->nr_uninterruptible++;
1915
1916        dequeue_task(rq, p, flags);
1917        dec_nr_running(rq);
1918}
1919
1920#include "sched_idletask.c"
1921#include "sched_fair.c"
1922#include "sched_rt.c"
1923#ifdef CONFIG_SCHED_DEBUG
1924# include "sched_debug.c"
1925#endif
1926
1927/*
1928 * __normal_prio - return the priority that is based on the static prio
1929 */
1930static inline int __normal_prio(struct task_struct *p)
1931{
1932        return p->static_prio;
1933}
1934
1935/*
1936 * Calculate the expected normal priority: i.e. priority
1937 * without taking RT-inheritance into account. Might be
1938 * boosted by interactivity modifiers. Changes upon fork,
1939 * setprio syscalls, and whenever the interactivity
1940 * estimator recalculates.
1941 */
1942static inline int normal_prio(struct task_struct *p)
1943{
1944        int prio;
1945
1946        if (task_has_rt_policy(p))
1947                prio = MAX_RT_PRIO-1 - p->rt_priority;
1948        else
1949                prio = __normal_prio(p);
1950        return prio;
1951}
1952
1953/*
1954 * Calculate the current priority, i.e. the priority
1955 * taken into account by the scheduler. This value might
1956 * be boosted by RT tasks, or might be boosted by
1957 * interactivity modifiers. Will be RT if the task got
1958 * RT-boosted. If not then it returns p->normal_prio.
1959 */
1960static int effective_prio(struct task_struct *p)
1961{
1962        p->normal_prio = normal_prio(p);
1963        /*
1964         * If we are RT tasks or we were boosted to RT priority,
1965         * keep the priority unchanged. Otherwise, update priority
1966         * to the normal priority:
1967         */
1968        if (!rt_prio(p->prio))
1969                return p->normal_prio;
1970        return p->prio;
1971}
1972
1973/**
1974 * task_curr - is this task currently executing on a CPU?
1975 * @p: the task in question.
1976 */
1977inline int task_curr(const struct task_struct *p)
1978{
1979        return cpu_curr(task_cpu(p)) == p;
1980}
1981
1982static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1983                                       const struct sched_class *prev_class,
1984                                       int oldprio, int running)
1985{
1986        if (prev_class != p->sched_class) {
1987                if (prev_class->switched_from)
1988                        prev_class->switched_from(rq, p, running);
1989                p->sched_class->switched_to(rq, p, running);
1990        } else
1991                p->sched_class->prio_changed(rq, p, oldprio, running);
1992}
1993
1994#ifdef CONFIG_SMP
1995/*
1996 * Is this task likely cache-hot:
1997 */
1998static int
1999task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2000{
2001        s64 delta;
2002
2003        if (p->sched_class != &fair_sched_class)
2004                return 0;
2005
2006        /*
2007         * Buddy candidates are cache hot:
2008         */
2009        if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
2010                        (&p->se == cfs_rq_of(&p->se)->next ||
2011                         &p->se == cfs_rq_of(&p->se)->last))
2012                return 1;
2013
2014        if (sysctl_sched_migration_cost == -1)
2015                return 1;
2016        if (sysctl_sched_migration_cost == 0)
2017                return 0;
2018
2019        delta = now - p->se.exec_start;
2020
2021        return delta < (s64)sysctl_sched_migration_cost;
2022}
2023
2024void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2025{
2026#ifdef CONFIG_SCHED_DEBUG
2027        /*
2028         * We should never call set_task_cpu() on a blocked task,
2029         * ttwu() will sort out the placement.
2030         */
2031        WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
2032                        !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
2033#endif
2034
2035        trace_sched_migrate_task(p, new_cpu);
2036
2037        if (task_cpu(p) != new_cpu) {
2038                p->se.nr_migrations++;
2039                perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
2040        }
2041
2042        __set_task_cpu(p, new_cpu);
2043}
2044
2045struct migration_arg {
2046        struct task_struct *task;
2047        int dest_cpu;
2048};
2049
2050static int migration_cpu_stop(void *data);
2051
2052/*
2053 * The task's runqueue lock must be held.
2054 * Returns true if you have to wait for migration thread.
2055 */
2056static bool migrate_task(struct task_struct *p, int dest_cpu)
2057{
2058        struct rq *rq = task_rq(p);
2059
2060        /*
2061         * If the task is not on a runqueue (and not running), then
2062         * the next wake-up will properly place the task.
2063         */
2064        return p->se.on_rq || task_running(rq, p);
2065}
2066
2067/*
2068 * wait_task_inactive - wait for a thread to unschedule.
2069 *
2070 * If @match_state is nonzero, it's the @p->state value just checked and
2071 * not expected to change.  If it changes, i.e. @p might have woken up,
2072 * then return zero.  When we succeed in waiting for @p to be off its CPU,
2073 * we return a positive number (its total switch count).  If a second call
2074 * a short while later returns the same number, the caller can be sure that
2075 * @p has remained unscheduled the whole time.
2076 *
2077 * The caller must ensure that the task *will* unschedule sometime soon,
2078 * else this function might spin for a *long* time. This function can't
2079 * be called with interrupts off, or it may introduce deadlock with
2080 * smp_call_function() if an IPI is sent by the same process we are
2081 * waiting to become inactive.
2082 */
2083unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2084{
2085        unsigned long flags;
2086        int running, on_rq;
2087        unsigned long ncsw;
2088        struct rq *rq;
2089
2090        for (;;) {
2091                /*
2092                 * We do the initial early heuristics without holding
2093                 * any task-queue locks at all. We'll only try to get
2094                 * the runqueue lock when things look like they will
2095                 * work out!
2096                 */
2097                rq = task_rq(p);
2098
2099                /*
2100                 * If the task is actively running on another CPU
2101                 * still, just relax and busy-wait without holding
2102                 * any locks.
2103                 *
2104                 * NOTE! Since we don't hold any locks, it's not
2105                 * even sure that "rq" stays as the right runqueue!
2106                 * But we don't care, since "task_running()" will
2107                 * return false if the runqueue has changed and p
2108                 * is actually now running somewhere else!
2109                 */
2110                while (task_running(rq, p)) {
2111                        if (match_state && unlikely(p->state != match_state))
2112                                return 0;
2113                        cpu_relax();
2114                }
2115
2116                /*
2117                 * Ok, time to look more closely! We need the rq
2118                 * lock now, to be *sure*. If we're wrong, we'll
2119                 * just go back and repeat.
2120                 */
2121                rq = task_rq_lock(p, &flags);
2122                trace_sched_wait_task(p);
2123                running = task_running(rq, p);
2124                on_rq = p->se.on_rq;
2125                ncsw = 0;
2126                if (!match_state || p->state == match_state)
2127                        ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2128                task_rq_unlock(rq, &flags);
2129
2130                /*
2131                 * If it changed from the expected state, bail out now.
2132                 */
2133                if (unlikely(!ncsw))
2134                        break;
2135
2136                /*
2137                 * Was it really running after all now that we
2138                 * checked with the proper locks actually held?
2139                 *
2140                 * Oops. Go back and try again..
2141                 */
2142                if (unlikely(running)) {
2143                        cpu_relax();
2144                        continue;
2145                }
2146
2147                /*
2148                 * It's not enough that it's not actively running,
2149                 * it must be off the runqueue _entirely_, and not
2150                 * preempted!
2151                 *
2152                 * So if it was still runnable (but just not actively
2153                 * running right now), it's preempted, and we should
2154                 * yield - it could be a while.
2155                 */
2156                if (unlikely(on_rq)) {
2157                        schedule_timeout_uninterruptible(1);
2158                        continue;
2159                }
2160
2161                /*
2162                 * Ahh, all good. It wasn't running, and it wasn't
2163                 * runnable, which means that it will never become
2164                 * running in the future either. We're all done!
2165                 */
2166                break;
2167        }
2168
2169        return ncsw;
2170}
2171
2172/***
2173 * kick_process - kick a running thread to enter/exit the kernel
2174 * @p: the to-be-kicked thread
2175 *
2176 * Cause a process which is running on another CPU to enter
2177 * kernel-mode, without any delay. (to get signals handled.)
2178 *
2179 * NOTE: this function doesnt have to take the runqueue lock,
2180 * because all it wants to ensure is that the remote task enters
2181 * the kernel. If the IPI races and the task has been migrated
2182 * to another CPU then no harm is done and the purpose has been
2183 * achieved as well.
2184 */
2185void kick_process(struct task_struct *p)
2186{
2187        int cpu;
2188
2189        preempt_disable();
2190        cpu = task_cpu(p);
2191        if ((cpu != smp_processor_id()) && task_curr(p))
2192                smp_send_reschedule(cpu);
2193        preempt_enable();
2194}
2195EXPORT_SYMBOL_GPL(kick_process);
2196#endif /* CONFIG_SMP */
2197
2198/**
2199 * task_oncpu_function_call - call a function on the cpu on which a task runs
2200 * @p:          the task to evaluate
2201 * @func:       the function to be called
2202 * @info:       the function call argument
2203 *
2204 * Calls the function @func when the task is currently running. This might
2205 * be on the current CPU, which just calls the function directly
2206 */
2207void task_oncpu_function_call(struct task_struct *p,
2208                              void (*func) (void *info), void *info)
2209{
2210        int cpu;
2211
2212        preempt_disable();
2213        cpu = task_cpu(p);
2214        if (task_curr(p))
2215                smp_call_function_single(cpu, func, info, 1);
2216        preempt_enable();
2217}
2218
2219#ifdef CONFIG_SMP
2220/*
2221 * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
2222 */
2223static int select_fallback_rq(int cpu, struct task_struct *p)
2224{
2225        int dest_cpu;
2226        const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
2227
2228        /* Look for allowed, online CPU in same node. */
2229        for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
2230                if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
2231                        return dest_cpu;
2232
2233        /* Any allowed, online CPU? */
2234        dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
2235        if (dest_cpu < nr_cpu_ids)
2236                return dest_cpu;
2237
2238        /* No more Mr. Nice Guy. */
2239        if (unlikely(dest_cpu >= nr_cpu_ids)) {
2240                dest_cpu = cpuset_cpus_allowed_fallback(p);
2241                /*
2242                 * Don't tell them about moving exiting tasks or
2243                 * kernel threads (both mm NULL), since they never
2244                 * leave kernel.
2245                 */
2246                if (p->mm && printk_ratelimit()) {
2247                        printk(KERN_INFO "process %d (%s) no "
2248                               "longer affine to cpu%d\n",
2249                               task_pid_nr(p), p->comm, cpu);
2250                }
2251        }
2252
2253        return dest_cpu;
2254}
2255
2256/*
2257 * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable.
2258 */
2259static inline
2260int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags)
2261{
2262        int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags);
2263
2264        /*
2265         * In order not to call set_task_cpu() on a blocking task we need
2266         * to rely on ttwu() to place the task on a valid ->cpus_allowed
2267         * cpu.
2268         *
2269         * Since this is common to all placement strategies, this lives here.
2270         *
2271         * [ this allows ->select_task() to simply return task_cpu(p) and
2272         *   not worry about this generic constraint ]
2273         */
2274        if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
2275                     !cpu_online(cpu)))
2276                cpu = select_fallback_rq(task_cpu(p), p);
2277
2278        return cpu;
2279}
2280
2281static void update_avg(u64 *avg, u64 sample)
2282{
2283        s64 diff = sample - *avg;
2284        *avg += diff >> 3;
2285}
2286#endif
2287
2288static inline void ttwu_activate(struct task_struct *p, struct rq *rq,
2289                                 bool is_sync, bool is_migrate, bool is_local,
2290                                 unsigned long en_flags)
2291{
2292        schedstat_inc(p, se.statistics.nr_wakeups);
2293        if (is_sync)
2294                schedstat_inc(p, se.statistics.nr_wakeups_sync);
2295        if (is_migrate)
2296                schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2297        if (is_local)
2298                schedstat_inc(p, se.statistics.nr_wakeups_local);
2299        else
2300                schedstat_inc(p, se.statistics.nr_wakeups_remote);
2301
2302        activate_task(rq, p, en_flags);
2303}
2304
2305static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq,
2306                                        int wake_flags, bool success)
2307{
2308        trace_sched_wakeup(p, success);
2309        check_preempt_curr(rq, p, wake_flags);
2310
2311        p->state = TASK_RUNNING;
2312#ifdef CONFIG_SMP
2313        if (p->sched_class->task_woken)
2314                p->sched_class->task_woken(rq, p);
2315
2316        if (unlikely(rq->idle_stamp)) {
2317                u64 delta = rq->clock - rq->idle_stamp;
2318                u64 max = 2*sysctl_sched_migration_cost;
2319
2320                if (delta > max)
2321                        rq->avg_idle = max;
2322                else
2323                        update_avg(&rq->avg_idle, delta);
2324                rq->idle_stamp = 0;
2325        }
2326#endif
2327        /* if a worker is waking up, notify workqueue */
2328        if ((p->flags & PF_WQ_WORKER) && success)
2329                wq_worker_waking_up(p, cpu_of(rq));
2330}
2331
2332/**
2333 * try_to_wake_up - wake up a thread
2334 * @p: the thread to be awakened
2335 * @state: the mask of task states that can be woken
2336 * @wake_flags: wake modifier flags (WF_*)
2337 *
2338 * Put it on the run-queue if it's not already there. The "current"
2339 * thread is always on the run-queue (except when the actual
2340 * re-schedule is in progress), and as such you're allowed to do
2341 * the simpler "current->state = TASK_RUNNING" to mark yourself
2342 * runnable without the overhead of this.
2343 *
2344 * Returns %true if @p was woken up, %false if it was already running
2345 * or @state didn't match @p's state.
2346 */
2347static int try_to_wake_up(struct task_struct *p, unsigned int state,
2348                          int wake_flags)
2349{
2350        int cpu, orig_cpu, this_cpu, success = 0;
2351        unsigned long flags;
2352        unsigned long en_flags = ENQUEUE_WAKEUP;
2353        struct rq *rq;
2354
2355        this_cpu = get_cpu();
2356
2357        smp_wmb();
2358        rq = task_rq_lock(p, &flags);
2359        if (!(p->state & state))
2360                goto out;
2361
2362        if (p->se.on_rq)
2363                goto out_running;
2364
2365        cpu = task_cpu(p);
2366        orig_cpu = cpu;
2367
2368#ifdef CONFIG_SMP
2369        if (unlikely(task_running(rq, p)))
2370                goto out_activate;
2371
2372        /*
2373         * In order to handle concurrent wakeups and release the rq->lock
2374         * we put the task in TASK_WAKING state.
2375         *
2376         * First fix up the nr_uninterruptible count:
2377         */
2378        if (task_contributes_to_load(p)) {
2379                if (likely(cpu_online(orig_cpu)))
2380                        rq->nr_uninterruptible--;
2381                else
2382                        this_rq()->nr_uninterruptible--;
2383        }
2384        p->state = TASK_WAKING;
2385
2386        if (p->sched_class->task_waking) {
2387                p->sched_class->task_waking(rq, p);
2388                en_flags |= ENQUEUE_WAKING;
2389        }
2390
2391        cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
2392        if (cpu != orig_cpu)
2393                set_task_cpu(p, cpu);
2394        __task_rq_unlock(rq);
2395
2396        rq = cpu_rq(cpu);
2397        raw_spin_lock(&rq->lock);
2398
2399        /*
2400         * We migrated the task without holding either rq->lock, however
2401         * since the task is not on the task list itself, nobody else
2402         * will try and migrate the task, hence the rq should match the
2403         * cpu we just moved it to.
2404         */
2405        WARN_ON(task_cpu(p) != cpu);
2406        WARN_ON(p->state != TASK_WAKING);
2407
2408#ifdef CONFIG_SCHEDSTATS
2409        schedstat_inc(rq, ttwu_count);
2410        if (cpu == this_cpu)
2411                schedstat_inc(rq, ttwu_local);
2412        else {
2413                struct sched_domain *sd;
2414                for_each_domain(this_cpu, sd) {
2415                        if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
2416                                schedstat_inc(sd, ttwu_wake_remote);
2417                                break;
2418                        }
2419                }
2420        }
2421#endif /* CONFIG_SCHEDSTATS */
2422
2423out_activate:
2424#endif /* CONFIG_SMP */
2425        ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu,
2426                      cpu == this_cpu, en_flags);
2427        success = 1;
2428out_running:
2429        ttwu_post_activation(p, rq, wake_flags, success);
2430out:
2431        task_rq_unlock(rq, &flags);
2432        put_cpu();
2433
2434        return success;
2435}
2436
2437/**
2438 * try_to_wake_up_local - try to wake up a local task with rq lock held
2439 * @p: the thread to be awakened
2440 *
2441 * Put @p on the run-queue if it's not alredy there.  The caller must
2442 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2443 * the current task.  this_rq() stays locked over invocation.
2444 */
2445static void try_to_wake_up_local(struct task_struct *p)
2446{
2447        struct rq *rq = task_rq(p);
2448        bool success = false;
2449
2450        BUG_ON(rq != this_rq());
2451        BUG_ON(p == current);
2452        lockdep_assert_held(&rq->lock);
2453
2454        if (!(p->state & TASK_NORMAL))
2455                return;
2456
2457        if (!p->se.on_rq) {
2458                if (likely(!task_running(rq, p))) {
2459                        schedstat_inc(rq, ttwu_count);
2460                        schedstat_inc(rq, ttwu_local);
2461                }
2462                ttwu_activate(p, rq, false, false, true, ENQUEUE_WAKEUP);
2463                success = true;
2464        }
2465        ttwu_post_activation(p, rq, 0, success);
2466}
2467
2468/**
2469 * wake_up_process - Wake up a specific process
2470 * @p: The process to be woken up.
2471 *
2472 * Attempt to wake up the nominated process and move it to the set of runnable
2473 * processes.  Returns 1 if the process was woken up, 0 if it was already
2474 * running.
2475 *
2476 * It may be assumed that this function implies a write memory barrier before
2477 * changing the task state if and only if any tasks are woken up.
2478 */
2479int wake_up_process(struct task_struct *p)
2480{
2481        return try_to_wake_up(p, TASK_ALL, 0);
2482}
2483EXPORT_SYMBOL(wake_up_process);
2484
2485int wake_up_state(struct task_struct *p, unsigned int state)
2486{
2487        return try_to_wake_up(p, state, 0);
2488}
2489
2490/*
2491 * Perform scheduler related setup for a newly forked process p.
2492 * p is forked by current.
2493 *
2494 * __sched_fork() is basic setup used by init_idle() too:
2495 */
2496static void __sched_fork(struct task_struct *p)
2497{
2498        p->se.exec_start                = 0;
2499        p->se.sum_exec_runtime          = 0;
2500        p->se.prev_sum_exec_runtime     = 0;
2501        p->se.nr_migrations             = 0;
2502
2503#ifdef CONFIG_SCHEDSTATS
2504        memset(&p->se.statistics, 0, sizeof(p->se.statistics));
2505#endif
2506
2507        INIT_LIST_HEAD(&p->rt.run_list);
2508        p->se.on_rq = 0;
2509        INIT_LIST_HEAD(&p->se.group_node);
2510
2511#ifdef CONFIG_PREEMPT_NOTIFIERS
2512        INIT_HLIST_HEAD(&p->preempt_notifiers);
2513#endif
2514}
2515
2516/*
2517 * fork()/clone()-time setup:
2518 */
2519void sched_fork(struct task_struct *p, int clone_flags)
2520{
2521        int cpu = get_cpu();
2522
2523        __sched_fork(p);
2524        /*
2525         * We mark the process as running here. This guarantees that
2526         * nobody will actually run it, and a signal or other external
2527         * event cannot wake it up and insert it on the runqueue either.
2528         */
2529        p->state = TASK_RUNNING;
2530
2531        /*
2532         * Revert to default priority/policy on fork if requested.
2533         */
2534        if (unlikely(p->sched_reset_on_fork)) {
2535                if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
2536                        p->policy = SCHED_NORMAL;
2537                        p->normal_prio = p->static_prio;
2538                }
2539
2540                if (PRIO_TO_NICE(p->static_prio) < 0) {
2541                        p->static_prio = NICE_TO_PRIO(0);
2542                        p->normal_prio = p->static_prio;
2543                        set_load_weight(p);
2544                }
2545
2546                /*
2547                 * We don't need the reset flag anymore after the fork. It has
2548                 * fulfilled its duty:
2549                 */
2550                p->sched_reset_on_fork = 0;
2551        }
2552
2553        /*
2554         * Make sure we do not leak PI boosting priority to the child.
2555         */
2556        p->prio = current->normal_prio;
2557
2558        if (!rt_prio(p->prio))
2559                p->sched_class = &fair_sched_class;
2560
2561        if (p->sched_class->task_fork)
2562                p->sched_class->task_fork(p);
2563
2564        /*
2565         * The child is not yet in the pid-hash so no cgroup attach races,
2566         * and the cgroup is pinned to this child due to cgroup_fork()
2567         * is ran before sched_fork().
2568         *
2569         * Silence PROVE_RCU.
2570         */
2571        rcu_read_lock();
2572        set_task_cpu(p, cpu);
2573        rcu_read_unlock();
2574
2575#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
2576        if (likely(sched_info_on()))
2577                memset(&p->sched_info, 0, sizeof(p->sched_info));
2578#endif
2579#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
2580        p->oncpu = 0;
2581#endif
2582#ifdef CONFIG_PREEMPT
2583        /* Want to start with kernel preemption disabled. */
2584        task_thread_info(p)->preempt_count = 1;
2585#endif
2586        plist_node_init(&p->pushable_tasks, MAX_PRIO);
2587
2588        put_cpu();
2589}
2590
2591/*
2592 * wake_up_new_task - wake up a newly created task for the first time.
2593 *
2594 * This function will do some initial scheduler statistics housekeeping
2595 * that must be done for every newly created context, then puts the task
2596 * on the runqueue and wakes it.
2597 */
2598void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2599{
2600        unsigned long flags;
2601        struct rq *rq;
2602        int cpu __maybe_unused = get_cpu();
2603
2604#ifdef CONFIG_SMP
2605        rq = task_rq_lock(p, &flags);
2606        p->state = TASK_WAKING;
2607
2608        /*
2609         * Fork balancing, do it here and not earlier because:
2610         *  - cpus_allowed can change in the fork path
2611         *  - any previously selected cpu might disappear through hotplug
2612         *
2613         * We set TASK_WAKING so that select_task_rq() can drop rq->lock
2614         * without people poking at ->cpus_allowed.
2615         */
2616        cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0);
2617        set_task_cpu(p, cpu);
2618
2619        p->state = TASK_RUNNING;
2620        task_rq_unlock(rq, &flags);
2621#endif
2622
2623        rq = task_rq_lock(p, &flags);
2624        activate_task(rq, p, 0);
2625        trace_sched_wakeup_new(p, 1);
2626        check_preempt_curr(rq, p, WF_FORK);
2627#ifdef CONFIG_SMP
2628        if (p->sched_class->task_woken)
2629                p->sched_class->task_woken(rq, p);
2630#endif
2631        task_rq_unlock(rq, &flags);
2632        put_cpu();
2633}
2634
2635#ifdef CONFIG_PREEMPT_NOTIFIERS
2636
2637/**
2638 * preempt_notifier_register - tell me when current is being preempted & rescheduled
2639 * @notifier: notifier struct to register
2640 */
2641void preempt_notifier_register(struct preempt_notifier *notifier)
2642{
2643        hlist_add_head(&notifier->link, &current->preempt_notifiers);
2644}
2645EXPORT_SYMBOL_GPL(preempt_notifier_register);
2646
2647/**
2648 * preempt_notifier_unregister - no longer interested in preemption notifications
2649 * @notifier: notifier struct to unregister
2650 *
2651 * This is safe to call from within a preemption notifier.
2652 */
2653void preempt_notifier_unregister(struct preempt_notifier *notifier)
2654{
2655        hlist_del(&notifier->link);
2656}
2657EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2658
2659static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2660{
2661        struct preempt_notifier *notifier;
2662        struct hlist_node *node;
2663
2664        hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2665                notifier->ops->sched_in(notifier, raw_smp_processor_id());
2666}
2667
2668static void
2669fire_sched_out_preempt_notifiers(struct task_struct *curr,
2670                                 struct task_struct *next)
2671{
2672        struct preempt_notifier *notifier;
2673        struct hlist_node *node;
2674
2675        hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2676                notifier->ops->sched_out(notifier, next);
2677}
2678
2679#else /* !CONFIG_PREEMPT_NOTIFIERS */
2680
2681static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2682{
2683}
2684
2685static void
2686fire_sched_out_preempt_notifiers(struct task_struct *curr,
2687                                 struct task_struct *next)
2688{
2689}
2690
2691#endif /* CONFIG_PREEMPT_NOTIFIERS */
2692
2693/**
2694 * prepare_task_switch - prepare to switch tasks
2695 * @rq: the runqueue preparing to switch
2696 * @prev: the current task that is being switched out
2697 * @next: the task we are going to switch to.
2698 *
2699 * This is called with the rq lock held and interrupts off. It must
2700 * be paired with a subsequent finish_task_switch after the context
2701 * switch.
2702 *
2703 * prepare_task_switch sets up locking and calls architecture specific
2704 * hooks.
2705 */
2706static inline void
2707prepare_task_switch(struct rq *rq, struct task_struct *prev,
2708                    struct task_struct *next)
2709{
2710        fire_sched_out_preempt_notifiers(prev, next);
2711        prepare_lock_switch(rq, next);
2712        prepare_arch_switch(next);
2713}
2714
2715/**
2716 * finish_task_switch - clean up after a task-switch
2717 * @rq: runqueue associated with task-switch
2718 * @prev: the thread we just switched away from.
2719 *
2720 * finish_task_switch must be called after the context switch, paired
2721 * with a prepare_task_switch call before the context switch.
2722 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2723 * and do any other architecture-specific cleanup actions.
2724 *
2725 * Note that we may have delayed dropping an mm in context_switch(). If
2726 * so, we finish that here outside of the runqueue lock. (Doing it
2727 * with the lock held can cause deadlocks; see schedule() for
2728 * details.)
2729 */
2730static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2731        __releases(rq->lock)
2732{
2733        struct mm_struct *mm = rq->prev_mm;
2734        long prev_state;
2735
2736        rq->prev_mm = NULL;
2737
2738        /*
2739         * A task struct has one reference for the use as "current".
2740         * If a task dies, then it sets TASK_DEAD in tsk->state and calls
2741         * schedule one last time. The schedule call will never return, and
2742         * the scheduled task must drop that reference.
2743         * The test for TASK_DEAD must occur while the runqueue locks are
2744         * still held, otherwise prev could be scheduled on another cpu, die
2745         * there before we look at prev->state, and then the reference would
2746         * be dropped twice.
2747         *              Manfred Spraul <manfred@colorfullife.com>
2748         */
2749        prev_state = prev->state;
2750        finish_arch_switch(prev);
2751#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2752        local_irq_disable();
2753#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
2754        perf_event_task_sched_in(current);
2755#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2756        local_irq_enable();
2757#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
2758        finish_lock_switch(rq, prev);
2759
2760        fire_sched_in_preempt_notifiers(current);
2761        if (mm)
2762                mmdrop(mm);
2763        if (unlikely(prev_state == TASK_DEAD)) {
2764                /*
2765                 * Remove function-return probe instances associated with this
2766                 * task and put them back on the free list.
2767                 */
2768                kprobe_flush_task(prev);
2769                put_task_struct(prev);
2770        }
2771}
2772
2773#ifdef CONFIG_SMP
2774
2775/* assumes rq->lock is held */
2776static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
2777{
2778        if (prev->sched_class->pre_schedule)
2779                prev->sched_class->pre_schedule(rq, prev);
2780}
2781
2782/* rq->lock is NOT held, but preemption is disabled */
2783static inline void post_schedule(struct rq *rq)
2784{
2785        if (rq->post_schedule) {
2786                unsigned long flags;
2787
2788                raw_spin_lock_irqsave(&rq->lock, flags);
2789                if (rq->curr->sched_class->post_schedule)
2790                        rq->curr->sched_class->post_schedule(rq);
2791                raw_spin_unlock_irqrestore(&rq->lock, flags);
2792
2793                rq->post_schedule = 0;
2794        }
2795}
2796
2797#else
2798
2799static inline void pre_schedule(struct rq *rq, struct task_struct *p)
2800{
2801}
2802
2803static inline void post_schedule(struct rq *rq)
2804{
2805}
2806
2807#endif
2808
2809/**
2810 * schedule_tail - first thing a freshly forked thread must call.
2811 * @prev: the thread we just switched away from.
2812 */
2813asmlinkage void schedule_tail(struct task_struct *prev)
2814        __releases(rq->lock)
2815{
2816        struct rq *rq = this_rq();
2817
2818        finish_task_switch(rq, prev);
2819
2820        /*
2821         * FIXME: do we need to worry about rq being invalidated by the
2822         * task_switch?
2823         */
2824        post_schedule(rq);
2825
2826#ifdef __ARCH_WANT_UNLOCKED_CTXSW
2827        /* In this case, finish_task_switch does not reenable preemption */
2828        preempt_enable();
2829#endif
2830        if (current->set_child_tid)
2831                put_user(task_pid_vnr(current), current->set_child_tid);
2832}
2833
2834/*
2835 * context_switch - switch to the new MM and the new
2836 * thread's register state.
2837 */
2838static inline void
2839context_switch(struct rq *rq, struct task_struct *prev,
2840               struct task_struct *next)
2841{
2842        struct mm_struct *mm, *oldmm;
2843
2844        prepare_task_switch(rq, prev, next);
2845        trace_sched_switch(prev, next);
2846        mm = next->mm;
2847        oldmm = prev->active_mm;
2848        /*
2849         * For paravirt, this is coupled with an exit in switch_to to
2850         * combine the page table reload and the switch backend into
2851         * one hypercall.
2852         */
2853        arch_start_context_switch(prev);
2854
2855        if (likely(!mm)) {
2856                next->active_mm = oldmm;
2857                atomic_inc(&oldmm->mm_count);
2858                enter_lazy_tlb(oldmm, next);
2859        } else
2860                switch_mm(oldmm, mm, next);
2861
2862        if (likely(!prev->mm)) {
2863                prev->active_mm = NULL;
2864                rq->prev_mm = oldmm;
2865        }
2866        /*
2867         * Since the runqueue lock will be released by the next
2868         * task (which is an invalid locking op but in the case
2869         * of the scheduler it's an obvious special-case), so we
2870         * do an early lockdep release here:
2871         */
2872#ifndef __ARCH_WANT_UNLOCKED_CTXSW
2873        spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
2874#endif
2875
2876        /* Here we just switch the register state and the stack. */
2877        switch_to(prev, next, prev);
2878
2879        barrier();
2880        /*
2881         * this_rq must be evaluated again because prev may have moved
2882         * CPUs since it called schedule(), thus the 'rq' on its stack
2883         * frame will be invalid.
2884         */
2885        finish_task_switch(this_rq(), prev);
2886}
2887
2888/*
2889 * nr_running, nr_uninterruptible and nr_context_switches:
2890 *
2891 * externally visible scheduler statistics: current number of runnable
2892 * threads, current number of uninterruptible-sleeping threads, total
2893 * number of context switches performed since bootup.
2894 */
2895unsigned long nr_running(void)
2896{
2897        unsigned long i, sum = 0;
2898
2899        for_each_online_cpu(i)
2900                sum += cpu_rq(i)->nr_running;
2901
2902        return sum;
2903}
2904
2905unsigned long nr_uninterruptible(void)
2906{
2907        unsigned long i, sum = 0;
2908
2909        for_each_possible_cpu(i)
2910                sum += cpu_rq(i)->nr_uninterruptible;
2911
2912        /*
2913         * Since we read the counters lockless, it might be slightly
2914         * inaccurate. Do not allow it to go below zero though:
2915         */
2916        if (unlikely((long)sum < 0))
2917                sum = 0;
2918
2919        return sum;
2920}
2921
2922unsigned long long nr_context_switches(void)
2923{
2924        int i;
2925        unsigned long long sum = 0;
2926
2927        for_each_possible_cpu(i)
2928                sum += cpu_rq(i)->nr_switches;
2929
2930        return sum;
2931}
2932
2933unsigned long nr_iowait(void)
2934{
2935        unsigned long i, sum = 0;
2936
2937        for_each_possible_cpu(i)
2938                sum += atomic_read(&cpu_rq(i)->nr_iowait);
2939
2940        return sum;
2941}
2942
2943unsigned long nr_iowait_cpu(int cpu)
2944{
2945        struct rq *this = cpu_rq(cpu);
2946        return atomic_read(&this->nr_iowait);
2947}
2948
2949unsigned long this_cpu_load(void)
2950{
2951        struct rq *this = this_rq();
2952        return this->cpu_load[0];
2953}
2954
2955
2956/* Variables and functions for calc_load */
2957static atomic_long_t calc_load_tasks;
2958static unsigned long calc_load_update;
2959unsigned long avenrun[3];
2960EXPORT_SYMBOL(avenrun);
2961
2962static long calc_load_fold_active(struct rq *this_rq)
2963{
2964        long nr_active, delta = 0;
2965
2966        nr_active = this_rq->nr_running;
2967        nr_active += (long) this_rq->nr_uninterruptible;
2968
2969        if (nr_active != this_rq->calc_load_active) {
2970                delta = nr_active - this_rq->calc_load_active;
2971                this_rq->calc_load_active = nr_active;
2972        }
2973
2974        return delta;
2975}
2976
2977#ifdef CONFIG_NO_HZ
2978/*
2979 * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
2980 *
2981 * When making the ILB scale, we should try to pull this in as well.
2982 */
2983static atomic_long_t calc_load_tasks_idle;
2984
2985static void calc_load_account_idle(struct rq *this_rq)
2986{
2987        long delta;
2988
2989        delta = calc_load_fold_active(this_rq);
2990        if (delta)
2991                atomic_long_add(delta, &calc_load_tasks_idle);
2992}
2993
2994static long calc_load_fold_idle(void)
2995{
2996        long delta = 0;
2997
2998        /*
2999         * Its got a race, we don't care...
3000         */
3001        if (atomic_long_read(&calc_load_tasks_idle))
3002                delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
3003
3004        return delta;
3005}
3006#else
3007static void calc_load_account_idle(struct rq *this_rq)
3008{
3009}
3010
3011static inline long calc_load_fold_idle(void)
3012{
3013        return 0;
3014}
3015#endif
3016
3017/**
3018 * get_avenrun - get the load average array
3019 * @loads:      pointer to dest load array
3020 * @offset:     offset to add
3021 * @shift:      shift count to shift the result left
3022 *
3023 * These values are estimates at best, so no need for locking.
3024 */
3025void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
3026{
3027        loads[0] = (avenrun[0] + offset) << shift;
3028        loads[1] = (avenrun[1] + offset) << shift;
3029        loads[2] = (avenrun[2] + offset) << shift;
3030}
3031
3032static unsigned long
3033calc_load(unsigned long load, unsigned long exp, unsigned long active)
3034{
3035        load *= exp;
3036        load += active * (FIXED_1 - exp);
3037        return load >> FSHIFT;
3038}
3039
3040/*
3041 * calc_load - update the avenrun load estimates 10 ticks after the
3042 * CPUs have updated calc_load_tasks.
3043 */
3044void calc_global_load(void)
3045{
3046        unsigned long upd = calc_load_update + 10;
3047        long active;
3048
3049        if (time_before(jiffies, upd))
3050                return;
3051
3052        active = atomic_long_read(&calc_load_tasks);
3053        active = active > 0 ? active * FIXED_1 : 0;
3054
3055        avenrun[0] = calc_load(avenrun[0], EXP_1, active);
3056        avenrun[1] = calc_load(avenrun[1], EXP_5, active);
3057        avenrun[2] = calc_load(avenrun[2], EXP_15, active);
3058
3059        calc_load_update += LOAD_FREQ;
3060}
3061
3062/*
3063 * Called from update_cpu_load() to periodically update this CPU's
3064 * active count.
3065 */
3066static void calc_load_account_active(struct rq *this_rq)
3067{
3068        long delta;
3069
3070        if (time_before(jiffies, this_rq->calc_load_update))
3071                return;
3072
3073        delta  = calc_load_fold_active(this_rq);
3074        delta += calc_load_fold_idle();
3075        if (delta)
3076                atomic_long_add(delta, &calc_load_tasks);
3077
3078        this_rq->calc_load_update += LOAD_FREQ;
3079}
3080
3081/*
3082 * The exact cpuload at various idx values, calculated at every tick would be
3083 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
3084 *
3085 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
3086 * on nth tick when cpu may be busy, then we have:
3087 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3088 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
3089 *
3090 * decay_load_missed() below does efficient calculation of
3091 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3092 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
3093 *
3094 * The calculation is approximated on a 128 point scale.
3095 * degrade_zero_ticks is the number of ticks after which load at any
3096 * particular idx is approximated to be zero.
3097 * degrade_factor is a precomputed table, a row for each load idx.
3098 * Each column corresponds to degradation factor for a power of two ticks,
3099 * based on 128 point scale.
3100 * Example:
3101 * row 2, col 3 (=12) says that the degradation at load idx 2 after
3102 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
3103 *
3104 * With this power of 2 load factors, we can degrade the load n times
3105 * by looking at 1 bits in n and doing as many mult/shift instead of
3106 * n mult/shifts needed by the exact degradation.
3107 */
3108#define DEGRADE_SHIFT           7
3109static const unsigned char
3110                degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
3111static const unsigned char
3112                degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
3113                                        {0, 0, 0, 0, 0, 0, 0, 0},
3114                                        {64, 32, 8, 0, 0, 0, 0, 0},
3115                                        {96, 72, 40, 12, 1, 0, 0},
3116                                        {112, 98, 75, 43, 15, 1, 0},
3117                                        {120, 112, 98, 76, 45, 16, 2} };
3118
3119/*
3120 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
3121 * would be when CPU is idle and so we just decay the old load without
3122 * adding any new load.
3123 */
3124static unsigned long
3125decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
3126{
3127        int j = 0;
3128
3129        if (!missed_updates)
3130                return load;
3131
3132        if (missed_updates >= degrade_zero_ticks[idx])
3133                return 0;
3134
3135        if (idx == 1)
3136                return load >> missed_updates;
3137
3138        while (missed_updates) {
3139                if (missed_updates % 2)
3140                        load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
3141
3142                missed_updates >>= 1;
3143                j++;
3144        }
3145        return load;
3146}
3147
3148/*
3149 * Update rq->cpu_load[] statistics. This function is usually called every
3150 * scheduler tick (TICK_NSEC). With tickless idle this will not be called
3151 * every tick. We fix it up based on jiffies.
3152 */
3153static void update_cpu_load(struct rq *this_rq)
3154{
3155        unsigned long this_load = this_rq->load.weight;
3156        unsigned long curr_jiffies = jiffies;
3157        unsigned long pending_updates;
3158        int i, scale;
3159
3160        this_rq->nr_load_updates++;
3161
3162        /* Avoid repeated calls on same jiffy, when moving in and out of idle */
3163        if (curr_jiffies == this_rq->last_load_update_tick)
3164                return;
3165
3166        pending_updates = curr_jiffies - this_rq->last_load_update_tick;
3167        this_rq->last_load_update_tick = curr_jiffies;
3168
3169        /* Update our load: */
3170        this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
3171        for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
3172                unsigned long old_load, new_load;
3173
3174                /* scale is effectively 1 << i now, and >> i divides by scale */
3175
3176                old_load = this_rq->cpu_load[i];
3177                old_load = decay_load_missed(old_load, pending_updates - 1, i);
3178                new_load = this_load;
3179                /*
3180                 * Round up the averaging division if load is increasing. This
3181                 * prevents us from getting stuck on 9 if the load is 10, for
3182                 * example.
3183                 */
3184                if (new_load > old_load)
3185                        new_load += scale - 1;
3186
3187                this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
3188        }
3189
3190        sched_avg_update(this_rq);
3191}
3192
3193static void update_cpu_load_active(struct rq *this_rq)
3194{
3195        update_cpu_load(this_rq);
3196
3197        calc_load_account_active(this_rq);
3198}
3199
3200#ifdef CONFIG_SMP
3201
3202/*
3203 * sched_exec - execve() is a valuable balancing opportunity, because at
3204 * this point the task has the smallest effective memory and cache footprint.
3205 */
3206void sched_exec(void)
3207{
3208        struct task_struct *p = current;
3209        unsigned long flags;
3210        struct rq *rq;
3211        int dest_cpu;
3212
3213        rq = task_rq_lock(p, &flags);
3214        dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0);
3215        if (dest_cpu == smp_processor_id())
3216                goto unlock;
3217
3218        /*
3219         * select_task_rq() can race against ->cpus_allowed
3220         */
3221        if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
3222            likely(cpu_active(dest_cpu)) && migrate_task(p, dest_cpu)) {
3223                struct migration_arg arg = { p, dest_cpu };
3224
3225                task_rq_unlock(rq, &flags);
3226                stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
3227                return;
3228        }
3229unlock:
3230        task_rq_unlock(rq, &flags);
3231}
3232
3233#endif
3234
3235DEFINE_PER_CPU(struct kernel_stat, kstat);
3236
3237EXPORT_PER_CPU_SYMBOL(kstat);
3238
3239/*
3240 * Return any ns on the sched_clock that have not yet been accounted in
3241 * @p in case that task is currently running.
3242 *
3243 * Called with task_rq_lock() held on @rq.
3244 */
3245static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
3246{
3247        u64 ns = 0;
3248
3249        if (task_current(rq, p)) {
3250                update_rq_clock(rq);
3251                ns = rq->clock - p->se.exec_start;
3252                if ((s64)ns < 0)
3253                        ns = 0;
3254        }
3255
3256        return ns;
3257}
3258
3259unsigned long long task_delta_exec(struct task_struct *p)
3260{
3261        unsigned long flags;
3262        struct rq *rq;
3263        u64 ns = 0;
3264
3265        rq = task_rq_lock(p, &flags);
3266        ns = do_task_delta_exec(p, rq);
3267        task_rq_unlock(rq, &flags);
3268
3269        return ns;
3270}
3271
3272/*
3273 * Return accounted runtime for the task.
3274 * In case the task is currently running, return the runtime plus current's
3275 * pending runtime that have not been accounted yet.
3276 */
3277unsigned long long task_sched_runtime(struct task_struct *p)
3278{
3279        unsigned long flags;
3280        struct rq *rq;
3281        u64 ns = 0;
3282
3283        rq = task_rq_lock(p, &flags);
3284        ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
3285        task_rq_unlock(rq, &flags);
3286
3287        return ns;
3288}
3289
3290/*
3291 * Return sum_exec_runtime for the thread group.
3292 * In case the task is currently running, return the sum plus current's
3293 * pending runtime that have not been accounted yet.
3294 *
3295 * Note that the thread group might have other running tasks as well,
3296 * so the return value not includes other pending runtime that other
3297 * running tasks might have.
3298 */
3299unsigned long long thread_group_sched_runtime(struct task_struct *p)
3300{
3301        struct task_cputime totals;
3302        unsigned long flags;
3303        struct rq *rq;
3304        u64 ns;
3305
3306        rq = task_rq_lock(p, &flags);
3307        thread_group_cputime(p, &totals);
3308        ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
3309        task_rq_unlock(rq, &flags);
3310
3311        return ns;
3312}
3313
3314/*
3315 * Account user cpu time to a process.
3316 * @p: the process that the cpu time gets accounted to
3317 * @cputime: the cpu time spent in user space since the last update
3318 * @cputime_scaled: cputime scaled by cpu frequency
3319 */
3320void account_user_time(struct task_struct *p, cputime_t cputime,
3321                       cputime_t cputime_scaled)
3322{
3323        struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3324        cputime64_t tmp;
3325
3326        /* Add user time to process. */
3327        p->utime = cputime_add(p->utime, cputime);
3328        p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
3329        account_group_user_time(p, cputime);
3330
3331        /* Add user time to cpustat. */
3332        tmp = cputime_to_cputime64(cputime);
3333        if (TASK_NICE(p) > 0)
3334                cpustat->nice = cputime64_add(cpustat->nice, tmp);
3335        else
3336                cpustat->user = cputime64_add(cpustat->user, tmp);
3337
3338        cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
3339        /* Account for user time used */
3340        acct_update_integrals(p);
3341}
3342
3343/*
3344 * Account guest cpu time to a process.
3345 * @p: the process that the cpu time gets accounted to
3346 * @cputime: the cpu time spent in virtual machine since the last update
3347 * @cputime_scaled: cputime scaled by cpu frequency
3348 */
3349static void account_guest_time(struct task_struct *p, cputime_t cputime,
3350                               cputime_t cputime_scaled)
3351{
3352        cputime64_t tmp;
3353        struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3354
3355        tmp = cputime_to_cputime64(cputime);
3356
3357        /* Add guest time to process. */
3358        p->utime = cputime_add(p->utime, cputime);
3359        p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
3360        account_group_user_time(p, cputime);
3361        p->gtime = cputime_add(p->gtime, cputime);
3362
3363        /* Add guest time to cpustat. */
3364        if (TASK_NICE(p) > 0) {
3365                cpustat->nice = cputime64_add(cpustat->nice, tmp);
3366                cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
3367        } else {
3368                cpustat->user = cputime64_add(cpustat->user, tmp);
3369                cpustat->guest = cputime64_add(cpustat->guest, tmp);
3370        }
3371}
3372
3373/*
3374 * Account system cpu time to a process.
3375 * @p: the process that the cpu time gets accounted to
3376 * @hardirq_offset: the offset to subtract from hardirq_count()
3377 * @cputime: the cpu time spent in kernel space since the last update
3378 * @cputime_scaled: cputime scaled by cpu frequency
3379 */
3380void account_system_time(struct task_struct *p, int hardirq_offset,
3381                         cputime_t cputime, cputime_t cputime_scaled)
3382{
3383        struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3384        cputime64_t tmp;
3385
3386        if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
3387                account_guest_time(p, cputime, cputime_scaled);
3388                return;
3389        }
3390
3391        /* Add system time to process. */
3392        p->stime = cputime_add(p->stime, cputime);
3393        p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
3394        account_group_system_time(p, cputime);
3395
3396        /* Add system time to cpustat. */
3397        tmp = cputime_to_cputime64(cputime);
3398        if (hardirq_count() - hardirq_offset)
3399                cpustat->irq = cputime64_add(cpustat->irq, tmp);
3400        else if (softirq_count())
3401                cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
3402        else
3403                cpustat->system = cputime64_add(cpustat->system, tmp);
3404
3405        cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
3406
3407        /* Account for system time used */
3408        acct_update_integrals(p);
3409}
3410
3411/*
3412 * Account for involuntary wait time.
3413 * @steal: the cpu time spent in involuntary wait
3414 */
3415void account_steal_time(cputime_t cputime)
3416{
3417        struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3418        cputime64_t cputime64 = cputime_to_cputime64(cputime);
3419
3420        cpustat->steal = cputime64_add(cpustat->steal, cputime64);
3421}
3422
3423/*
3424 * Account for idle time.
3425 * @cputime: the cpu time spent in idle wait
3426 */
3427void account_idle_time(cputime_t cputime)
3428{
3429        struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3430        cputime64_t cputime64 = cputime_to_cputime64(cputime);
3431        struct rq *rq = this_rq();
3432
3433        if (atomic_read(&rq->nr_iowait) > 0)
3434                cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
3435        else
3436                cpustat->idle = cputime64_add(cpustat->idle, cputime64);
3437}
3438
3439#ifndef CONFIG_VIRT_CPU_ACCOUNTING
3440
3441/*
3442 * Account a single tick of cpu time.
3443 * @p: the process that the cpu time gets accounted to
3444 * @user_tick: indicates if the tick is a user or a system tick
3445 */
3446void account_process_tick(struct task_struct *p, int user_tick)
3447{
3448        cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
3449        struct rq *rq = this_rq();
3450
3451        if (user_tick)
3452                account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
3453        else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
3454                account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
3455                                    one_jiffy_scaled);
3456        else
3457                account_idle_time(cputime_one_jiffy);
3458}
3459
3460/*
3461 * Account multiple ticks of steal time.
3462 * @p: the process from which the cpu time has been stolen
3463 * @ticks: number of stolen ticks
3464 */
3465void account_steal_ticks(unsigned long ticks)
3466{
3467        account_steal_time(jiffies_to_cputime(ticks));
3468}
3469
3470/*
3471 * Account multiple ticks of idle time.
3472 * @ticks: number of stolen ticks
3473 */
3474void account_idle_ticks(unsigned long ticks)
3475{
3476        account_idle_time(jiffies_to_cputime(ticks));
3477}
3478
3479#endif
3480
3481/*
3482 * Use precise platform statistics if available:
3483 */
3484#ifdef CONFIG_VIRT_CPU_ACCOUNTING
3485void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3486{
3487        *ut = p->utime;
3488        *st = p->stime;
3489}
3490
3491void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3492{
3493        struct task_cputime cputime;
3494
3495        thread_group_cputime(p, &cputime);
3496
3497        *ut = cputime.utime;
3498        *st = cputime.stime;
3499}
3500#else
3501
3502#ifndef nsecs_to_cputime
3503# define nsecs_to_cputime(__nsecs)      nsecs_to_jiffies(__nsecs)
3504#endif
3505
3506void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3507{
3508        cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
3509
3510        /*
3511         * Use CFS's precise accounting:
3512         */
3513        rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
3514
3515        if (total) {
3516                u64 temp = rtime;
3517
3518                temp *= utime;
3519                do_div(temp, total);
3520                utime = (cputime_t)temp;
3521        } else
3522                utime = rtime;
3523
3524        /*
3525         * Compare with previous values, to keep monotonicity:
3526         */
3527        p->prev_utime = max(p->prev_utime, utime);
3528        p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
3529
3530        *ut = p->prev_utime;
3531        *st = p->prev_stime;
3532}
3533
3534/*
3535 * Must be called with siglock held.
3536 */
3537void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3538{
3539        struct signal_struct *sig = p->signal;
3540        struct task_cputime cputime;
3541        cputime_t rtime, utime, total;
3542
3543        thread_group_cputime(p, &cputime);
3544
3545        total = cputime_add(cputime.utime, cputime.stime);
3546        rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
3547
3548        if (total) {
3549                u64 temp = rtime;
3550
3551                temp *= cputime.utime;
3552                do_div(temp, total);
3553                utime = (cputime_t)temp;
3554        } else
3555                utime = rtime;
3556
3557        sig->prev_utime = max(sig->prev_utime, utime);
3558        sig->prev_stime = max(sig->prev_stime,
3559                              cputime_sub(rtime, sig->prev_utime));
3560
3561        *ut = sig->prev_utime;
3562        *st = sig->prev_stime;
3563}
3564#endif
3565
3566/*
3567 * This function gets called by the timer code, with HZ frequency.
3568 * We call it with interrupts disabled.
3569 *
3570 * It also gets called by the fork code, when changing the parent's
3571 * timeslices.
3572 */
3573void scheduler_tick(void)
3574{
3575        int cpu = smp_processor_id();
3576        struct rq *rq = cpu_rq(cpu);
3577        struct task_struct *curr = rq->curr;
3578
3579        sched_clock_tick();
3580
3581        raw_spin_lock(&rq->lock);
3582        update_rq_clock(rq);
3583        update_cpu_load_active(rq);
3584        curr->sched_class->task_tick(rq, curr, 0);
3585        raw_spin_unlock(&rq->lock);
3586
3587        perf_event_task_tick(curr);
3588
3589#ifdef CONFIG_SMP
3590        rq->idle_at_tick = idle_cpu(cpu);
3591        trigger_load_balance(rq, cpu);
3592#endif
3593}
3594
3595notrace unsigned long get_parent_ip(unsigned long addr)
3596{
3597        if (in_lock_functions(addr)) {
3598                addr = CALLER_ADDR2;
3599                if (in_lock_functions(addr))
3600                        addr = CALLER_ADDR3;
3601        }
3602        return addr;
3603}
3604
3605#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
3606                                defined(CONFIG_PREEMPT_TRACER))
3607
3608void __kprobes add_preempt_count(int val)
3609{
3610#ifdef CONFIG_DEBUG_PREEMPT
3611        /*
3612         * Underflow?
3613         */
3614        if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3615                return;
3616#endif
3617        preempt_count() += val;
3618#ifdef CONFIG_DEBUG_PREEMPT
3619        /*
3620         * Spinlock count overflowing soon?
3621         */
3622        DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
3623                                PREEMPT_MASK - 10);
3624#endif
3625        if (preempt_count() == val)
3626                trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
3627}
3628EXPORT_SYMBOL(add_preempt_count);
3629
3630void __kprobes sub_preempt_count(int val)
3631{
3632#ifdef CONFIG_DEBUG_PREEMPT
3633        /*
3634         * Underflow?
3635         */
3636        if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
3637                return;
3638        /*
3639         * Is the spinlock portion underflowing?
3640         */
3641        if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3642                        !(preempt_count() & PREEMPT_MASK)))
3643                return;
3644#endif
3645
3646        if (preempt_count() == val)
3647                trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
3648        preempt_count() -= val;
3649}
3650EXPORT_SYMBOL(sub_preempt_count);
3651
3652#endif
3653
3654/*
3655 * Print scheduling while atomic bug:
3656 */
3657static noinline void __schedule_bug(struct task_struct *prev)
3658{
3659        struct pt_regs *regs = get_irq_regs();
3660
3661        printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
3662                prev->comm, prev->pid, preempt_count());
3663
3664        debug_show_held_locks(prev);
3665        print_modules();
3666        if (irqs_disabled())
3667                print_irqtrace_events(prev);
3668
3669        if (regs)
3670                show_regs(regs);
3671        else
3672                dump_stack();
3673}
3674
3675/*
3676 * Various schedule()-time debugging checks and statistics:
3677 */
3678static inline void schedule_debug(struct task_struct *prev)
3679{
3680        /*
3681         * Test if we are atomic. Since do_exit() needs to call into
3682         * schedule() atomically, we ignore that path for now.
3683         * Otherwise, whine if we are scheduling when we should not be.
3684         */
3685        if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
3686                __schedule_bug(prev);
3687
3688        profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3689
3690        schedstat_inc(this_rq(), sched_count);
3691#ifdef CONFIG_SCHEDSTATS
3692        if (unlikely(prev->lock_depth >= 0)) {
3693                schedstat_inc(this_rq(), bkl_count);
3694                schedstat_inc(prev, sched_info.bkl_count);
3695        }
3696#endif
3697}
3698
3699static void put_prev_task(struct rq *rq, struct task_struct *prev)
3700{
3701        if (prev->se.on_rq)
3702                update_rq_clock(rq);
3703        rq->skip_clock_update = 0;
3704        prev->sched_class->put_prev_task(rq, prev);
3705}
3706
3707/*
3708 * Pick up the highest-prio task:
3709 */
3710static inline struct task_struct *
3711pick_next_task(struct rq *rq)
3712{
3713        const struct sched_class *class;
3714        struct task_struct *p;
3715
3716        /*
3717         * Optimization: we know that if all tasks are in
3718         * the fair class we can call that function directly:
3719         */
3720        if (likely(rq->nr_running == rq->cfs.nr_running)) {
3721                p = fair_sched_class.pick_next_task(rq);
3722                if (likely(p))
3723                        return p;
3724        }
3725
3726        class = sched_class_highest;
3727        for ( ; ; ) {
3728                p = class->pick_next_task(rq);
3729                if (p)
3730                        return p;
3731                /*
3732                 * Will never be NULL as the idle class always
3733                 * returns a non-NULL p:
3734                 */
3735                class = class->next;
3736        }
3737}
3738
3739/*
3740 * schedule() is the main scheduler function.
3741 */
3742asmlinkage void __sched schedule(void)
3743{
3744        struct task_struct *prev, *next;
3745        unsigned long *switch_count;
3746        struct rq *rq;
3747        int cpu;
3748
3749need_resched:
3750        preempt_disable();
3751        cpu = smp_processor_id();
3752        rq = cpu_rq(cpu);
3753        rcu_note_context_switch(cpu);
3754        prev = rq->curr;
3755
3756        release_kernel_lock(prev);
3757need_resched_nonpreemptible:
3758
3759        schedule_debug(prev);
3760
3761        if (sched_feat(HRTICK))
3762                hrtick_clear(rq);
3763
3764        raw_spin_lock_irq(&rq->lock);
3765        clear_tsk_need_resched(prev);
3766
3767        switch_count = &prev->nivcsw;
3768        if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
3769                if (unlikely(signal_pending_state(prev->state, prev))) {
3770                        prev->state = TASK_RUNNING;
3771                } else {
3772                        /*
3773                         * If a worker is going to sleep, notify and
3774                         * ask workqueue whether it wants to wake up a
3775                         * task to maintain concurrency.  If so, wake
3776                         * up the task.
3777                         */
3778                        if (prev->flags & PF_WQ_WORKER) {
3779                                struct task_struct *to_wakeup;
3780
3781                                to_wakeup = wq_worker_sleeping(prev, cpu);
3782                                if (to_wakeup)
3783                                        try_to_wake_up_local(to_wakeup);
3784                        }
3785                        deactivate_task(rq, prev, DEQUEUE_SLEEP);
3786                }
3787                switch_count = &prev->nvcsw;
3788        }
3789
3790        pre_schedule(rq, prev);
3791
3792        if (unlikely(!rq->nr_running))
3793                idle_balance(cpu, rq);
3794
3795        put_prev_task(rq, prev);
3796        next = pick_next_task(rq);
3797
3798        if (likely(prev != next)) {
3799                sched_info_switch(prev, next);
3800                perf_event_task_sched_out(prev, next);
3801
3802                rq->nr_switches++;
3803                rq->curr = next;
3804                ++*switch_count;
3805
3806                context_switch(rq, prev, next); /* unlocks the rq */
3807                /*
3808                 * The context switch have flipped the stack from under us
3809                 * and restored the local variables which were saved when
3810                 * this task called schedule() in the past. prev == current
3811                 * is still correct, but it can be moved to another cpu/rq.
3812                 */
3813                cpu = smp_processor_id();
3814                rq = cpu_rq(cpu);
3815        } else
3816                raw_spin_unlock_irq(&rq->lock);
3817
3818        post_schedule(rq);
3819
3820        if (unlikely(reacquire_kernel_lock(prev)))
3821                goto need_resched_nonpreemptible;
3822
3823        preempt_enable_no_resched();
3824        if (need_resched())
3825                goto need_resched;
3826}
3827EXPORT_SYMBOL(schedule);
3828
3829#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
3830/*
3831 * Look out! "owner" is an entirely speculative pointer
3832 * access and not reliable.
3833 */
3834int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3835{
3836        unsigned int cpu;
3837        struct rq *rq;
3838
3839        if (!sched_feat(OWNER_SPIN))
3840                return 0;
3841
3842#ifdef CONFIG_DEBUG_PAGEALLOC
3843        /*
3844         * Need to access the cpu field knowing that
3845         * DEBUG_PAGEALLOC could have unmapped it if
3846         * the mutex owner just released it and exited.
3847         */
3848        if (probe_kernel_address(&owner->cpu, cpu))
3849                return 0;
3850#else
3851        cpu = owner->cpu;
3852#endif
3853
3854        /*
3855         * Even if the access succeeded (likely case),
3856         * the cpu field may no longer be valid.
3857         */
3858        if (cpu >= nr_cpumask_bits)
3859                return 0;
3860
3861        /*
3862         * We need to validate that we can do a
3863         * get_cpu() and that we have the percpu area.
3864         */
3865        if (!cpu_online(cpu))
3866                return 0;
3867
3868        rq = cpu_rq(cpu);
3869
3870        for (;;) {
3871                /*
3872                 * Owner changed, break to re-assess state.
3873                 */
3874                if (lock->owner != owner) {
3875                        /*
3876                         * If the lock has switched to a different owner,
3877                         * we likely have heavy contention. Return 0 to quit
3878                         * optimistic spinning and not contend further:
3879                         */
3880                        if (lock->owner)
3881                                return 0;
3882                        break;
3883                }
3884
3885                /*
3886                 * Is that owner really running on that cpu?
3887                 */
3888                if (task_thread_info(rq->curr) != owner || need_resched())
3889                        return 0;
3890
3891                cpu_relax();
3892        }
3893
3894        return 1;
3895}
3896#endif
3897
3898#ifdef CONFIG_PREEMPT
3899/*
3900 * this is the entry point to schedule() from in-kernel preemption
3901 * off of preempt_enable. Kernel preemptions off return from interrupt
3902 * occur there and call schedule directly.
3903 */
3904asmlinkage void __sched notrace preempt_schedule(void)
3905{
3906        struct thread_info *ti = current_thread_info();
3907
3908        /*
3909         * If there is a non-zero preempt_count or interrupts are disabled,
3910         * we do not want to preempt the current task. Just return..
3911         */
3912        if (likely(ti->preempt_count || irqs_disabled()))
3913                return;
3914
3915        do {
3916                add_preempt_count_notrace(PREEMPT_ACTIVE);
3917                schedule();
3918                sub_preempt_count_notrace(PREEMPT_ACTIVE);
3919
3920                /*
3921                 * Check again in case we missed a preemption opportunity
3922                 * between schedule and now.
3923                 */
3924                barrier();
3925        } while (need_resched());
3926}
3927EXPORT_SYMBOL(preempt_schedule);
3928
3929/*
3930 * this is the entry point to schedule() from kernel preemption
3931 * off of irq context.
3932 * Note, that this is called and return with irqs disabled. This will
3933 * protect us against recursive calling from irq.
3934 */
3935asmlinkage void __sched preempt_schedule_irq(void)
3936{
3937        struct thread_info *ti = current_thread_info();
3938
3939        /* Catch callers which need to be fixed */
3940        BUG_ON(ti->preempt_count || !irqs_disabled());
3941
3942        do {
3943                add_preempt_count(PREEMPT_ACTIVE);
3944                local_irq_enable();
3945                schedule();
3946                local_irq_disable();
3947                sub_preempt_count(PREEMPT_ACTIVE);
3948
3949                /*
3950                 * Check again in case we missed a preemption opportunity
3951                 * between schedule and now.
3952                 */
3953                barrier();
3954        } while (need_resched());
3955}
3956
3957#endif /* CONFIG_PREEMPT */
3958
3959int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
3960                          void *key)
3961{
3962        return try_to_wake_up(curr->private, mode, wake_flags);
3963}
3964EXPORT_SYMBOL(default_wake_function);
3965
3966/*
3967 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
3968 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
3969 * number) then we wake all the non-exclusive tasks and one exclusive task.
3970 *
3971 * There are circumstances in which we can try to wake a task which has already
3972 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
3973 * zero in this (rare) case, and we handle it by continuing to scan the queue.
3974 */
3975static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
3976                        int nr_exclusive, int wake_flags, void *key)
3977{
3978        wait_queue_t *curr, *next;
3979
3980        list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
3981                unsigned flags = curr->flags;
3982
3983                if (curr->func(curr, mode, wake_flags, key) &&
3984                                (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
3985                        break;
3986        }
3987}
3988
3989/**
3990 * __wake_up - wake up threads blocked on a waitqueue.
3991 * @q: the waitqueue
3992 * @mode: which threads
3993 * @nr_exclusive: how many wake-one or wake-many threads to wake up
3994 * @key: is directly passed to the wakeup function
3995 *
3996 * It may be assumed that this function implies a write memory barrier before
3997 * changing the task state if and only if any tasks are woken up.
3998 */
3999void __wake_up(wait_queue_head_t *q, unsigned int mode,
4000                        int nr_exclusive, void *key)
4001{
4002        unsigned long flags;
4003
4004        spin_lock_irqsave(&q->lock, flags);
4005        __wake_up_common(q, mode, nr_exclusive, 0, key);
4006        spin_unlock_irqrestore(&q->lock, flags);
4007}
4008EXPORT_SYMBOL(__wake_up);
4009
4010/*
4011 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
4012 */
4013void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
4014{
4015        __wake_up_common(q, mode, 1, 0, NULL);
4016}
4017EXPORT_SYMBOL_GPL(__wake_up_locked);
4018
4019void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
4020{
4021        __wake_up_common(q, mode, 1, 0, key);
4022}
4023
4024/**
4025 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
4026 * @q: the waitqueue
4027 * @mode: which threads
4028 * @nr_exclusive: how many wake-one or wake-many threads to wake up
4029 * @key: opaque value to be passed to wakeup targets
4030 *
4031 * The sync wakeup differs that the waker knows that it will schedule
4032 * away soon, so while the target thread will be woken up, it will not
4033 * be migrated to another CPU - ie. the two threads are 'synchronized'
4034 * with each other. This can prevent needless bouncing between CPUs.
4035 *
4036 * On UP it can prevent extra preemption.
4037 *
4038 * It may be assumed that this function implies a write memory barrier before
4039 * changing the task state if and only if any tasks are woken up.
4040 */
4041void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
4042                        int nr_exclusive, void *key)
4043{
4044        unsigned long flags;
4045        int wake_flags = WF_SYNC;
4046
4047        if (unlikely(!q))
4048                return;
4049
4050        if (unlikely(!nr_exclusive))
4051                wake_flags = 0;
4052
4053        spin_lock_irqsave(&q->lock, flags);
4054        __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
4055        spin_unlock_irqrestore(&q->lock, flags);
4056}
4057EXPORT_SYMBOL_GPL(__wake_up_sync_key);
4058
4059/*
4060 * __wake_up_sync - see __wake_up_sync_key()
4061 */
4062void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
4063{
4064        __wake_up_sync_key(q, mode, nr_exclusive, NULL);
4065}
4066EXPORT_SYMBOL_GPL(__wake_up_sync);      /* For internal use only */
4067
4068/**
4069 * complete: - signals a single thread waiting on this completion
4070 * @x:  holds the state of this particular completion
4071 *
4072 * This will wake up a single thread waiting on this completion. Threads will be
4073 * awakened in the same order in which they were queued.
4074 *
4075 * See also complete_all(), wait_for_completion() and related routines.
4076 *
4077 * It may be assumed that this function implies a write memory barrier before
4078 * changing the task state if and only if any tasks are woken up.
4079 */
4080void complete(struct completion *x)
4081{
4082        unsigned long flags;
4083
4084        spin_lock_irqsave(&x->wait.lock, flags);
4085        x->done++;
4086        __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
4087        spin_unlock_irqrestore(&x->wait.lock, flags);
4088}
4089EXPORT_SYMBOL(complete);
4090
4091/**
4092 * complete_all: - signals all threads waiting on this completion
4093 * @x:  holds the state of this particular completion
4094 *
4095 * This will wake up all threads waiting on this particular completion event.
4096 *
4097 * It may be assumed that this function implies a write memory barrier before
4098 * changing the task state if and only if any tasks are woken up.
4099 */
4100void complete_all(struct completion *x)
4101{
4102        unsigned long flags;
4103
4104        spin_lock_irqsave(&x->wait.lock, flags);
4105        x->done += UINT_MAX/2;
4106        __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
4107        spin_unlock_irqrestore(&x->wait.lock, flags);
4108}
4109EXPORT_SYMBOL(complete_all);
4110
4111static inline long __sched
4112do_wait_for_common(struct completion *x, long timeout, int state)
4113{
4114        if (!x->done) {
4115                DECLARE_WAITQUEUE(wait, current);
4116
4117                __add_wait_queue_tail_exclusive(&x->wait, &wait);
4118                do {
4119                        if (signal_pending_state(state, current)) {
4120                                timeout = -ERESTARTSYS;
4121                                break;
4122                        }
4123                        __set_current_state(state);
4124                        spin_unlock_irq(&x->wait.lock);
4125                        timeout = schedule_timeout(timeout);
4126                        spin_lock_irq(&x->wait.lock);
4127                } while (!x->done && timeout);
4128                __remove_wait_queue(&x->wait, &wait);
4129                if (!x->done)
4130                        return timeout;
4131        }
4132        x->done--;
4133        return timeout ?: 1;
4134}
4135
4136static long __sched
4137wait_for_common(struct completion *x, long timeout, int state)
4138{
4139        might_sleep();
4140
4141        spin_lock_irq(&x->wait.lock);
4142        timeout = do_wait_for_common(x, timeout, state);
4143        spin_unlock_irq(&x->wait.lock);
4144        return timeout;
4145}
4146
4147/**
4148 * wait_for_completion: - waits for completion of a task
4149 * @x:  holds the state of this particular completion
4150 *
4151 * This waits to be signaled for completion of a specific task. It is NOT
4152 * interruptible and there is no timeout.
4153 *
4154 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
4155 * and interrupt capability. Also see complete().
4156 */
4157void __sched wait_for_completion(struct completion *x)
4158{
4159        wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
4160}
4161EXPORT_SYMBOL(wait_for_completion);
4162
4163/**
4164 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
4165 * @x:  holds the state of this particular completion
4166 * @timeout:  timeout value in jiffies
4167 *
4168 * This waits for either a completion of a specific task to be signaled or for a
4169 * specified timeout to expire. The timeout is in jiffies. It is not
4170 * interruptible.
4171 */
4172unsigned long __sched
4173wait_for_completion_timeout(struct completion *x, unsigned long timeout)
4174{
4175        return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
4176}
4177EXPORT_SYMBOL(wait_for_completion_timeout);
4178
4179/**
4180 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
4181 * @x:  holds the state of this particular completion
4182 *
4183 * This waits for completion of a specific task to be signaled. It is
4184 * interruptible.
4185 */
4186int __sched wait_for_completion_interruptible(struct completion *x)
4187{
4188        long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
4189        if (t == -ERESTARTSYS)
4190                return t;
4191        return 0;
4192}
4193EXPORT_SYMBOL(wait_for_completion_interruptible);
4194
4195/**
4196 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
4197 * @x:  holds the state of this particular completion
4198 * @timeout:  timeout value in jiffies
4199 *
4200 * This waits for either a completion of a specific task to be signaled or for a
4201 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
4202 */
4203unsigned long __sched
4204wait_for_completion_interruptible_timeout(struct completion *x,
4205                                          unsigned long timeout)
4206{
4207        return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
4208}
4209EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
4210
4211/**
4212 * wait_for_completion_killable: - waits for completion of a task (killable)
4213 * @x:  holds the state of this particular completion
4214 *
4215 * This waits to be signaled for completion of a specific task. It can be
4216 * interrupted by a kill signal.
4217 */
4218int __sched wait_for_completion_killable(struct completion *x)
4219{
4220        long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
4221        if (t == -ERESTARTSYS)
4222                return t;
4223        return 0;
4224}
4225EXPORT_SYMBOL(wait_for_completion_killable);
4226
4227/**
4228 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
4229 * @x:  holds the state of this particular completion
4230 * @timeout:  timeout value in jiffies
4231 *
4232 * This waits for either a completion of a specific task to be
4233 * signaled or for a specified timeout to expire. It can be
4234 * interrupted by a kill signal. The timeout is in jiffies.
4235 */
4236unsigned long __sched
4237wait_for_completion_killable_timeout(struct completion *x,
4238                                     unsigned long timeout)
4239{
4240        return wait_for_common(x, timeout, TASK_KILLABLE);
4241}
4242EXPORT_SYMBOL(wait_for_completion_killable_timeout);
4243
4244/**
4245 *      try_wait_for_completion - try to decrement a completion without blocking
4246 *      @x:     completion structure
4247 *
4248 *      Returns: 0 if a decrement cannot be done without blocking
4249 *               1 if a decrement succeeded.
4250 *
4251 *      If a completion is being used as a counting completion,
4252 *      attempt to decrement the counter without blocking. This
4253 *      enables us to avoid waiting if the resource the completion
4254 *      is protecting is not available.
4255 */
4256bool try_wait_for_completion(struct completion *x)
4257{
4258        unsigned long flags;
4259        int ret = 1;
4260
4261        spin_lock_irqsave(&x->wait.lock, flags);
4262        if (!x->done)
4263                ret = 0;
4264        else
4265                x->done--;
4266        spin_unlock_irqrestore(&x->wait.lock, flags);
4267        return ret;
4268}
4269EXPORT_SYMBOL(try_wait_for_completion);
4270
4271/**
4272 *      completion_done - Test to see if a completion has any waiters
4273 *      @x:     completion structure
4274 *
4275 *      Returns: 0 if there are waiters (wait_for_completion() in progress)
4276 *               1 if there are no waiters.
4277 *
4278 */
4279bool completion_done(struct completion *x)
4280{
4281        unsigned long flags;
4282        int ret = 1;
4283
4284        spin_lock_irqsave(&x->wait.lock, flags);
4285        if (!x->done)
4286                ret = 0;
4287        spin_unlock_irqrestore(&x->wait.lock, flags);
4288        return ret;
4289}
4290EXPORT_SYMBOL(completion_done);
4291
4292static long __sched
4293sleep_on_common(wait_queue_head_t *q, int state, long timeout)
4294{
4295        unsigned long flags;
4296        wait_queue_t wait;
4297
4298        init_waitqueue_entry(&wait, current);
4299
4300        __set_current_state(state);
4301
4302        spin_lock_irqsave(&q->lock, flags);
4303        __add_wait_queue(q, &wait);
4304        spin_unlock(&q->lock);
4305        timeout = schedule_timeout(timeout);
4306        spin_lock_irq(&q->lock);
4307        __remove_wait_queue(q, &wait);
4308        spin_unlock_irqrestore(&q->lock, flags);
4309
4310        return timeout;
4311}
4312
4313void __sched interruptible_sleep_on(wait_queue_head_t *q)
4314{
4315        sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
4316}
4317EXPORT_SYMBOL(interruptible_sleep_on);
4318
4319long __sched
4320interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
4321{
4322        return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
4323}
4324EXPORT_SYMBOL(interruptible_sleep_on_timeout);
4325
4326void __sched sleep_on(wait_queue_head_t *q)
4327{
4328        sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
4329}
4330EXPORT_SYMBOL(sleep_on);
4331
4332long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
4333{
4334        return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
4335}
4336EXPORT_SYMBOL(sleep_on_timeout);
4337
4338#ifdef CONFIG_RT_MUTEXES
4339
4340/*
4341 * rt_mutex_setprio - set the current priority of a task
4342 * @p: task
4343 * @prio: prio value (kernel-internal form)
4344 *
4345 * This function changes the 'effective' priority of a task. It does
4346 * not touch ->normal_prio like __setscheduler().
4347 *
4348 * Used by the rt_mutex code to implement priority inheritance logic.
4349 */
4350void rt_mutex_setprio(struct task_struct *p, int prio)
4351{
4352        unsigned long flags;
4353        int oldprio, on_rq, running;
4354        struct rq *rq;
4355        const struct sched_class *prev_class;
4356
4357        BUG_ON(prio < 0 || prio > MAX_PRIO);
4358
4359        rq = task_rq_lock(p, &flags);
4360
4361        oldprio = p->prio;
4362        prev_class = p->sched_class;
4363        on_rq = p->se.on_rq;
4364        running = task_current(rq, p);
4365        if (on_rq)
4366                dequeue_task(rq, p, 0);
4367        if (running)
4368                p->sched_class->put_prev_task(rq, p);
4369
4370        if (rt_prio(prio))
4371                p->sched_class = &rt_sched_class;
4372        else
4373                p->sched_class = &fair_sched_class;
4374
4375        p->prio = prio;
4376
4377        if (running)
4378                p->sched_class->set_curr_task(rq);
4379        if (on_rq) {
4380                enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
4381
4382                check_class_changed(rq, p, prev_class, oldprio, running);
4383        }
4384        task_rq_unlock(rq, &flags);
4385}
4386
4387#endif
4388
4389void set_user_nice(struct task_struct *p, long nice)
4390{
4391        int old_prio, delta, on_rq;
4392        unsigned long flags;
4393        struct rq *rq;
4394
4395        if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
4396                return;
4397        /*
4398         * We have to be careful, if called from sys_setpriority(),
4399         * the task might be in the middle of scheduling on another CPU.
4400         */
4401        rq = task_rq_lock(p, &flags);
4402        /*
4403         * The RT priorities are set via sched_setscheduler(), but we still
4404         * allow the 'normal' nice value to be set - but as expected
4405         * it wont have any effect on scheduling until the task is
4406         * SCHED_FIFO/SCHED_RR:
4407         */
4408        if (task_has_rt_policy(p)) {
4409                p->static_prio = NICE_TO_PRIO(nice);
4410                goto out_unlock;
4411        }
4412        on_rq = p->se.on_rq;
4413        if (on_rq)
4414                dequeue_task(rq, p, 0);
4415
4416        p->static_prio = NICE_TO_PRIO(nice);
4417        set_load_weight(p);
4418        old_prio = p->prio;
4419        p->prio = effective_prio(p);
4420        delta = p->prio - old_prio;
4421
4422        if (on_rq) {
4423                enqueue_task(rq, p, 0);
4424                /*
4425                 * If the task increased its priority or is running and
4426                 * lowered its priority, then reschedule its CPU:
4427                 */
4428                if (delta < 0 || (delta > 0 && task_running(rq, p)))
4429                        resched_task(rq->curr);
4430        }
4431out_unlock:
4432        task_rq_unlock(rq, &flags);
4433}
4434EXPORT_SYMBOL(set_user_nice);
4435
4436/*
4437 * can_nice - check if a task can reduce its nice value
4438 * @p: task
4439 * @nice: nice value
4440 */
4441int can_nice(const struct task_struct *p, const int nice)
4442{
4443        /* convert nice value [19,-20] to rlimit style value [1,40] */
4444        int nice_rlim = 20 - nice;
4445
4446        return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
4447                capable(CAP_SYS_NICE));
4448}
4449
4450#ifdef __ARCH_WANT_SYS_NICE
4451
4452/*
4453 * sys_nice - change the priority of the current process.
4454 * @increment: priority increment
4455 *
4456 * sys_setpriority is a more generic, but much slower function that
4457 * does similar things.
4458 */
4459SYSCALL_DEFINE1(nice, int, increment)
4460{
4461        long nice, retval;
4462
4463        /*
4464         * Setpriority might change our priority at the same moment.
4465         * We don't have to worry. Conceptually one call occurs first
4466         * and we have a single winner.
4467         */
4468        if (increment < -40)
4469                increment = -40;
4470        if (increment > 40)
4471                increment = 40;
4472
4473        nice = TASK_NICE(current) + increment;
4474        if (nice < -20)
4475                nice = -20;
4476        if (nice > 19)
4477                nice = 19;
4478
4479        if (increment < 0 && !can_nice(current, nice))
4480                return -EPERM;
4481
4482        retval = security_task_setnice(current, nice);
4483        if (retval)
4484                return retval;
4485
4486        set_user_nice(current, nice);
4487        return 0;
4488}
4489
4490#endif
4491
4492/**
4493 * task_prio - return the priority value of a given task.
4494 * @p: the task in question.
4495 *
4496 * This is the priority value as seen by users in /proc.
4497 * RT tasks are offset by -200. Normal tasks are centered
4498 * around 0, value goes from -16 to +15.
4499 */
4500int task_prio(const struct task_struct *p)
4501{
4502        return p->prio - MAX_RT_PRIO;
4503}
4504
4505/**
4506 * task_nice - return the nice value of a given task.
4507 * @p: the task in question.
4508 */
4509int task_nice(const struct task_struct *p)
4510{
4511        return TASK_NICE(p);
4512}
4513EXPORT_SYMBOL(task_nice);
4514
4515/**
4516 * idle_cpu - is a given cpu idle currently?
4517 * @cpu: the processor in question.
4518 */
4519int idle_cpu(int cpu)
4520{
4521        return cpu_curr(cpu) == cpu_rq(cpu)->idle;
4522}
4523
4524/**
4525 * idle_task - return the idle task for a given cpu.
4526 * @cpu: the processor in question.
4527 */
4528struct task_struct *idle_task(int cpu)
4529{
4530        return cpu_rq(cpu)->idle;
4531}
4532
4533/**
4534 * find_process_by_pid - find a process with a matching PID value.
4535 * @pid: the pid in question.
4536 */
4537static struct task_struct *find_process_by_pid(pid_t pid)
4538{
4539        return pid ? find_task_by_vpid(pid) : current;
4540}
4541
4542/* Actually do priority change: must hold rq lock. */
4543static void
4544__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
4545{
4546        BUG_ON(p->se.on_rq);
4547
4548        p->policy = policy;
4549        p->rt_priority = prio;
4550        p->normal_prio = normal_prio(p);
4551        /* we are holding p->pi_lock already */
4552        p->prio = rt_mutex_getprio(p);
4553        if (rt_prio(p->prio))
4554                p->sched_class = &rt_sched_class;
4555        else
4556                p->sched_class = &fair_sched_class;
4557        set_load_weight(p);
4558}
4559
4560/*
4561 * check the target process has a UID that matches the current process's
4562 */
4563static bool check_same_owner(struct task_struct *p)
4564{
4565        const struct cred *cred = current_cred(), *pcred;
4566        bool match;
4567
4568        rcu_read_lock();
4569        pcred = __task_cred(p);
4570        match = (cred->euid == pcred->euid ||
4571                 cred->euid == pcred->uid);
4572        rcu_read_unlock();
4573        return match;
4574}
4575
4576static int __sched_setscheduler(struct task_struct *p, int policy,
4577                                struct sched_param *param, bool user)
4578{
4579        int retval, oldprio, oldpolicy = -1, on_rq, running;
4580        unsigned long flags;
4581        const struct sched_class *prev_class;
4582        struct rq *rq;
4583        int reset_on_fork;
4584
4585        /* may grab non-irq protected spin_locks */
4586        BUG_ON(in_interrupt());
4587recheck:
4588        /* double check policy once rq lock held */
4589        if (policy < 0) {
4590                reset_on_fork = p->sched_reset_on_fork;
4591                policy = oldpolicy = p->policy;
4592        } else {
4593                reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
4594                policy &= ~SCHED_RESET_ON_FORK;
4595
4596                if (policy != SCHED_FIFO && policy != SCHED_RR &&
4597                                policy != SCHED_NORMAL && policy != SCHED_BATCH &&
4598                                policy != SCHED_IDLE)
4599                        return -EINVAL;
4600        }
4601
4602        /*
4603         * Valid priorities for SCHED_FIFO and SCHED_RR are
4604         * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
4605         * SCHED_BATCH and SCHED_IDLE is 0.
4606         */
4607        if (param->sched_priority < 0 ||
4608            (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
4609            (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
4610                return -EINVAL;
4611        if (rt_policy(policy) != (param->sched_priority != 0))
4612                return -EINVAL;
4613
4614        /*
4615         * Allow unprivileged RT tasks to decrease priority:
4616         */
4617        if (user && !capable(CAP_SYS_NICE)) {
4618                if (rt_policy(policy)) {
4619                        unsigned long rlim_rtprio =
4620                                        task_rlimit(p, RLIMIT_RTPRIO);
4621
4622                        /* can't set/change the rt policy */
4623                        if (policy != p->policy && !rlim_rtprio)
4624                                return -EPERM;
4625
4626                        /* can't increase priority */
4627                        if (param->sched_priority > p->rt_priority &&
4628                            param->sched_priority > rlim_rtprio)
4629                                return -EPERM;
4630                }
4631                /*
4632                 * Like positive nice levels, dont allow tasks to
4633                 * move out of SCHED_IDLE either:
4634                 */
4635                if (p->policy == SCHED_IDLE && policy != SCHED_IDLE)
4636                        return -EPERM;
4637
4638                /* can't change other user's priorities */
4639                if (!check_same_owner(p))
4640                        return -EPERM;
4641
4642                /* Normal users shall not reset the sched_reset_on_fork flag */
4643                if (p->sched_reset_on_fork && !reset_on_fork)
4644                        return -EPERM;
4645        }
4646
4647        if (user) {
4648                retval = security_task_setscheduler(p, policy, param);
4649                if (retval)
4650                        return retval;
4651        }
4652
4653        /*
4654         * make sure no PI-waiters arrive (or leave) while we are
4655         * changing the priority of the task:
4656         */
4657        raw_spin_lock_irqsave(&p->pi_lock, flags);
4658        /*
4659         * To be able to change p->policy safely, the apropriate
4660         * runqueue lock must be held.
4661         */
4662        rq = __task_rq_lock(p);
4663
4664#ifdef CONFIG_RT_GROUP_SCHED
4665        if (user) {
4666                /*
4667                 * Do not allow realtime tasks into groups that have no runtime
4668                 * assigned.
4669                 */
4670                if (rt_bandwidth_enabled() && rt_policy(policy) &&
4671                                task_group(p)->rt_bandwidth.rt_runtime == 0) {
4672                        __task_rq_unlock(rq);
4673                        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4674                        return -EPERM;
4675                }
4676        }
4677#endif
4678
4679        /* recheck policy now with rq lock held */
4680        if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4681                policy = oldpolicy = -1;
4682                __task_rq_unlock(rq);
4683                raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4684                goto recheck;
4685        }
4686        on_rq = p->se.on_rq;
4687        running = task_current(rq, p);
4688        if (on_rq)
4689                deactivate_task(rq, p, 0);
4690        if (running)
4691                p->sched_class->put_prev_task(rq, p);
4692
4693        p->sched_reset_on_fork = reset_on_fork;
4694
4695        oldprio = p->prio;
4696        prev_class = p->sched_class;
4697        __setscheduler(rq, p, policy, param->sched_priority);
4698
4699        if (running)
4700                p->sched_class->set_curr_task(rq);
4701        if (on_rq) {
4702                activate_task(rq, p, 0);
4703
4704                check_class_changed(rq, p, prev_class, oldprio, running);
4705        }
4706        __task_rq_unlock(rq);
4707        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4708
4709        rt_mutex_adjust_pi(p);
4710
4711        return 0;
4712}
4713
4714/**
4715 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4716 * @p: the task in question.
4717 * @policy: new policy.
4718 * @param: structure containing the new RT priority.
4719 *
4720 * NOTE that the task may be already dead.
4721 */
4722int sched_setscheduler(struct task_struct *p, int policy,
4723                       struct sched_param *param)
4724{
4725        return __sched_setscheduler(p, policy, param, true);
4726}
4727EXPORT_SYMBOL_GPL(sched_setscheduler);
4728
4729/**
4730 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
4731 * @p: the task in question.
4732 * @policy: new policy.
4733 * @param: structure containing the new RT priority.
4734 *
4735 * Just like sched_setscheduler, only don't bother checking if the
4736 * current context has permission.  For example, this is needed in
4737 * stop_machine(): we create temporary high priority worker threads,
4738 * but our caller might not have that capability.
4739 */
4740int sched_setscheduler_nocheck(struct task_struct *p, int policy,
4741                               struct sched_param *param)
4742{
4743        return __sched_setscheduler(p, policy, param, false);
4744}
4745
4746static int
4747do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4748{
4749        struct sched_param lparam;
4750        struct task_struct *p;
4751        int retval;
4752
4753        if (!param || pid < 0)
4754                return -EINVAL;
4755        if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4756                return -EFAULT;
4757
4758        rcu_read_lock();
4759        retval = -ESRCH;
4760        p = find_process_by_pid(pid);
4761        if (p != NULL)
4762                retval = sched_setscheduler(p, policy, &lparam);
4763        rcu_read_unlock();
4764
4765        return retval;
4766}
4767
4768/**
4769 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4770 * @pid: the pid in question.
4771 * @policy: new policy.
4772 * @param: structure containing the new RT priority.
4773 */
4774SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
4775                struct sched_param __user *, param)
4776{
4777        /* negative values for policy are not valid */
4778        if (policy < 0)
4779                return -EINVAL;
4780
4781        return do_sched_setscheduler(pid, policy, param);
4782}
4783
4784/**
4785 * sys_sched_setparam - set/change the RT priority of a thread
4786 * @pid: the pid in question.
4787 * @param: structure containing the new RT priority.
4788 */
4789SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
4790{
4791        return do_sched_setscheduler(pid, -1, param);
4792}
4793
4794/**
4795 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4796 * @pid: the pid in question.
4797 */
4798SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
4799{
4800        struct task_struct *p;
4801        int retval;
4802
4803        if (pid < 0)
4804                return -EINVAL;
4805
4806        retval = -ESRCH;
4807        rcu_read_lock();
4808        p = find_process_by_pid(pid);
4809        if (p) {
4810                retval = security_task_getscheduler(p);
4811                if (!retval)
4812                        retval = p->policy
4813                                | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
4814        }
4815        rcu_read_unlock();
4816        return retval;
4817}
4818
4819/**
4820 * sys_sched_getparam - get the RT priority of a thread
4821 * @pid: the pid in question.
4822 * @param: structure containing the RT priority.
4823 */
4824SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
4825{
4826        struct sched_param lp;
4827        struct task_struct *p;
4828        int retval;
4829
4830        if (!param || pid < 0)
4831                return -EINVAL;
4832
4833        rcu_read_lock();
4834        p = find_process_by_pid(pid);
4835        retval = -ESRCH;
4836        if (!p)
4837                goto out_unlock;
4838
4839        retval = security_task_getscheduler(p);
4840        if (retval)
4841                goto out_unlock;
4842
4843        lp.sched_priority = p->rt_priority;
4844        rcu_read_unlock();
4845
4846        /*
4847         * This one might sleep, we cannot do it with a spinlock held ...
4848         */
4849        retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4850
4851        return retval;
4852
4853out_unlock:
4854        rcu_read_unlock();
4855        return retval;
4856}
4857
4858long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
4859{
4860        cpumask_var_t cpus_allowed, new_mask;
4861        struct task_struct *p;
4862        int retval;
4863
4864        get_online_cpus();
4865        rcu_read_lock();
4866
4867        p = find_process_by_pid(pid);
4868        if (!p) {
4869                rcu_read_unlock();
4870                put_online_cpus();
4871                return -ESRCH;
4872        }
4873
4874        /* Prevent p going away */
4875        get_task_struct(p);
4876        rcu_read_unlock();
4877
4878        if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4879                retval = -ENOMEM;
4880                goto out_put_task;
4881        }
4882        if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
4883                retval = -ENOMEM;
4884                goto out_free_cpus_allowed;
4885        }
4886        retval = -EPERM;
4887        if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
4888                goto out_unlock;
4889
4890        retval = security_task_setscheduler(p, 0, NULL);
4891        if (retval)
4892                goto out_unlock;
4893
4894        cpuset_cpus_allowed(p, cpus_allowed);
4895        cpumask_and(new_mask, in_mask, cpus_allowed);
4896 again:
4897        retval = set_cpus_allowed_ptr(p, new_mask);
4898
4899        if (!retval) {
4900                cpuset_cpus_allowed(p, cpus_allowed);
4901                if (!cpumask_subset(new_mask, cpus_allowed)) {
4902                        /*
4903                         * We must have raced with a concurrent cpuset
4904                         * update. Just reset the cpus_allowed to the
4905                         * cpuset's cpus_allowed
4906                         */
4907                        cpumask_copy(new_mask, cpus_allowed);
4908                        goto again;
4909                }
4910        }
4911out_unlock:
4912        free_cpumask_var(new_mask);
4913out_free_cpus_allowed:
4914        free_cpumask_var(cpus_allowed);
4915out_put_task:
4916        put_task_struct(p);
4917        put_online_cpus();
4918        return retval;
4919}
4920
4921static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
4922                             struct cpumask *new_mask)
4923{
4924        if (len < cpumask_size())
4925                cpumask_clear(new_mask);
4926        else if (len > cpumask_size())
4927                len = cpumask_size();
4928
4929        return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4930}
4931
4932/**
4933 * sys_sched_setaffinity - set the cpu affinity of a process
4934 * @pid: pid of the process
4935 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4936 * @user_mask_ptr: user-space pointer to the new cpu mask
4937 */
4938SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
4939                unsigned long __user *, user_mask_ptr)
4940{
4941        cpumask_var_t new_mask;
4942        int retval;
4943
4944        if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4945                return -ENOMEM;
4946
4947        retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
4948        if (retval == 0)
4949                retval = sched_setaffinity(pid, new_mask);
4950        free_cpumask_var(new_mask);
4951        return retval;
4952}
4953
4954long sched_getaffinity(pid_t pid, struct cpumask *mask)
4955{
4956        struct task_struct *p;
4957        unsigned long flags;
4958        struct rq *rq;
4959        int retval;
4960
4961        get_online_cpus();
4962        rcu_read_lock();
4963
4964        retval = -ESRCH;
4965        p = find_process_by_pid(pid);
4966        if (!p)
4967                goto out_unlock;
4968
4969        retval = security_task_getscheduler(p);
4970        if (retval)
4971                goto out_unlock;
4972
4973        rq = task_rq_lock(p, &flags);
4974        cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
4975        task_rq_unlock(rq, &flags);
4976
4977out_unlock:
4978        rcu_read_unlock();
4979        put_online_cpus();
4980
4981        return retval;
4982}
4983
4984/**
4985 * sys_sched_getaffinity - get the cpu affinity of a process
4986 * @pid: pid of the process
4987 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4988 * @user_mask_ptr: user-space pointer to hold the current cpu mask
4989 */
4990SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4991                unsigned long __user *, user_mask_ptr)
4992{
4993        int ret;
4994        cpumask_var_t mask;
4995
4996        if ((len * BITS_PER_BYTE) < nr_cpu_ids)
4997                return -EINVAL;
4998        if (len & (sizeof(unsigned long)-1))
4999                return -EINVAL;
5000
5001        if (!alloc_cpumask_var(&mask, GFP_KERNEL))
5002                return -ENOMEM;
5003
5004        ret = sched_getaffinity(pid, mask);
5005        if (ret == 0) {
5006                size_t retlen = min_t(size_t, len, cpumask_size());
5007
5008                if (copy_to_user(user_mask_ptr, mask, retlen))
5009                        ret = -EFAULT;
5010                else
5011                        ret = retlen;
5012        }
5013        free_cpumask_var(mask);
5014
5015        return ret;
5016}
5017
5018/**
5019 * sys_sched_yield - yield the current processor to other threads.
5020 *
5021 * This function yields the current CPU to other tasks. If there are no
5022 * other threads running on this CPU then this function will return.
5023 */
5024SYSCALL_DEFINE0(sched_yield)
5025{
5026        struct rq *rq = this_rq_lock();
5027
5028        schedstat_inc(rq, yld_count);
5029        current->sched_class->yield_task(rq);
5030
5031        /*
5032         * Since we are going to call schedule() anyway, there's
5033         * no need to preempt or enable interrupts:
5034         */
5035        __release(rq->lock);
5036        spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
5037        do_raw_spin_unlock(&rq->lock);
5038        preempt_enable_no_resched();
5039
5040        schedule();
5041
5042        return 0;
5043}
5044
5045static inline int should_resched(void)
5046{
5047        return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
5048}
5049
5050static void __cond_resched(void)
5051{
5052        add_preempt_count(PREEMPT_ACTIVE);
5053        schedule();
5054        sub_preempt_count(PREEMPT_ACTIVE);
5055}
5056
5057int __sched _cond_resched(void)
5058{
5059        if (should_resched()) {
5060                __cond_resched();
5061                return 1;
5062        }
5063        return 0;
5064}
5065EXPORT_SYMBOL(_cond_resched);
5066
5067/*
5068 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
5069 * call schedule, and on return reacquire the lock.
5070 *
5071 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
5072 * operations here to prevent schedule() from being called twice (once via
5073 * spin_unlock(), once by hand).
5074 */
5075int __cond_resched_lock(spinlock_t *lock)
5076{
5077        int resched = should_resched();
5078        int ret = 0;
5079
5080        lockdep_assert_held(lock);
5081
5082        if (spin_needbreak(lock) || resched) {
5083                spin_unlock(lock);
5084                if (resched)
5085                        __cond_resched();
5086                else
5087                        cpu_relax();
5088                ret = 1;
5089                spin_lock(lock);
5090        }
5091        return ret;
5092}
5093EXPORT_SYMBOL(__cond_resched_lock);
5094
5095int __sched __cond_resched_softirq(void)
5096{
5097        BUG_ON(!in_softirq());
5098
5099        if (should_resched()) {
5100                local_bh_enable();
5101                __cond_resched();
5102                local_bh_disable();
5103                return 1;
5104        }
5105        return 0;
5106}
5107EXPORT_SYMBOL(__cond_resched_softirq);
5108
5109/**
5110 * yield - yield the current processor to other threads.
5111 *
5112 * This is a shortcut for kernel-space yielding - it marks the
5113 * thread runnable and calls sys_sched_yield().
5114 */
5115void __sched yield(void)
5116{
5117        set_current_state(TASK_RUNNING);
5118        sys_sched_yield();
5119}
5120EXPORT_SYMBOL(yield);
5121
5122/*
5123 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
5124 * that process accounting knows that this is a task in IO wait state.
5125 */
5126void __sched io_schedule(void)
5127{
5128        struct rq *rq = raw_rq();
5129
5130        delayacct_blkio_start();
5131        atomic_inc(&rq->nr_iowait);
5132        current->in_iowait = 1;
5133        schedule();
5134        current->in_iowait = 0;
5135        atomic_dec(&rq->nr_iowait);
5136        delayacct_blkio_end();
5137}
5138EXPORT_SYMBOL(io_schedule);
5139
5140long __sched io_schedule_timeout(long timeout)
5141{
5142        struct rq *rq = raw_rq();
5143        long ret;
5144
5145        delayacct_blkio_start();
5146        atomic_inc(&rq->nr_iowait);
5147        current->in_iowait = 1;
5148        ret = schedule_timeout(timeout);
5149        current->in_iowait = 0;
5150        atomic_dec(&rq->nr_iowait);
5151        delayacct_blkio_end();
5152        return ret;
5153}
5154
5155/**
5156 * sys_sched_get_priority_max - return maximum RT priority.
5157 * @policy: scheduling class.
5158 *
5159 * this syscall returns the maximum rt_priority that can be used
5160 * by a given scheduling class.
5161 */
5162SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
5163{
5164        int ret = -EINVAL;
5165
5166        switch (policy) {
5167        case SCHED_FIFO:
5168        case SCHED_RR:
5169                ret = MAX_USER_RT_PRIO-1;
5170                break;
5171        case SCHED_NORMAL:
5172        case SCHED_BATCH:
5173        case SCHED_IDLE:
5174                ret = 0;
5175                break;
5176        }
5177        return ret;
5178}
5179
5180/**
5181 * sys_sched_get_priority_min - return minimum RT priority.
5182 * @policy: scheduling class.
5183 *
5184 * this syscall returns the minimum rt_priority that can be used
5185 * by a given scheduling class.
5186 */
5187SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
5188{
5189        int ret = -EINVAL;
5190
5191        switch (policy) {
5192        case SCHED_FIFO:
5193        case SCHED_RR:
5194                ret = 1;
5195                break;
5196        case SCHED_NORMAL:
5197        case SCHED_BATCH:
5198        case SCHED_IDLE:
5199                ret = 0;
5200        }
5201        return ret;
5202}
5203
5204/**
5205 * sys_sched_rr_get_interval - return the default timeslice of a process.
5206 * @pid: pid of the process.
5207 * @interval: userspace pointer to the timeslice value.
5208 *
5209 * this syscall writes the default timeslice value of a given process
5210 * into the user-space timespec buffer. A value of '0' means infinity.
5211 */
5212SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
5213                struct timespec __user *, interval)
5214{
5215        struct task_struct *p;
5216        unsigned int time_slice;
5217        unsigned long flags;
5218        struct rq *rq;
5219        int retval;
5220        struct timespec t;
5221
5222        if (pid < 0)
5223                return -EINVAL;
5224
5225        retval = -ESRCH;
5226        rcu_read_lock();
5227        p = find_process_by_pid(pid);
5228        if (!p)
5229                goto out_unlock;
5230
5231        retval = security_task_getscheduler(p);
5232        if (retval)
5233                goto out_unlock;
5234
5235        rq = task_rq_lock(p, &flags);
5236        time_slice = p->sched_class->get_rr_interval(rq, p);
5237        task_rq_unlock(rq, &flags);
5238
5239        rcu_read_unlock();
5240        jiffies_to_timespec(time_slice, &t);
5241        retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
5242        return retval;
5243
5244out_unlock:
5245        rcu_read_unlock();
5246        return retval;
5247}
5248
5249static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
5250
5251void sched_show_task(struct task_struct *p)
5252{
5253        unsigned long free = 0;
5254        unsigned state;
5255
5256        state = p->state ? __ffs(p->state) + 1 : 0;
5257        printk(KERN_INFO "%-13.13s %c", p->comm,
5258                state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
5259#if BITS_PER_LONG == 32
5260        if (state == TASK_RUNNING)
5261                printk(KERN_CONT " running  ");
5262        else
5263                printk(KERN_CONT " %08lx ", thread_saved_pc(p));
5264#else
5265        if (state == TASK_RUNNING)
5266                printk(KERN_CONT "  running task    ");
5267        else
5268                printk(KERN_CONT " %016lx ", thread_saved_pc(p));
5269#endif
5270#ifdef CONFIG_DEBUG_STACK_USAGE
5271        free = stack_not_used(p);
5272#endif
5273        printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
5274                task_pid_nr(p), task_pid_nr(p->real_parent),
5275                (unsigned long)task_thread_info(p)->flags);
5276
5277        show_stack(p, NULL);
5278}
5279
5280void show_state_filter(unsigned long state_filter)
5281{
5282        struct task_struct *g, *p;
5283
5284#if BITS_PER_LONG == 32
5285        printk(KERN_INFO
5286                "  task                PC stack   pid father\n");
5287#else
5288        printk(KERN_INFO
5289                "  task                        PC stack   pid father\n");
5290#endif
5291        read_lock(&tasklist_lock);
5292        do_each_thread(g, p) {
5293                /*
5294                 * reset the NMI-timeout, listing all files on a slow
5295                 * console might take alot of time:
5296                 */
5297                touch_nmi_watchdog();
5298                if (!state_filter || (p->state & state_filter))
5299                        sched_show_task(p);
5300        } while_each_thread(g, p);
5301
5302        touch_all_softlockup_watchdogs();
5303
5304#ifdef CONFIG_SCHED_DEBUG
5305        sysrq_sched_debug_show();
5306#endif
5307        read_unlock(&tasklist_lock);
5308        /*
5309         * Only show locks if all tasks are dumped:
5310         */
5311        if (!state_filter)
5312                debug_show_all_locks();
5313}
5314
5315void __cpuinit init_idle_bootup_task(struct task_struct *idle)
5316{
5317        idle->sched_class = &idle_sched_class;
5318}
5319
5320/**
5321 * init_idle - set up an idle thread for a given CPU
5322 * @idle: task in question
5323 * @cpu: cpu the idle task belongs to
5324 *
5325 * NOTE: this function does not set the idle thread's NEED_RESCHED
5326 * flag, to make booting more robust.
5327 */
5328void __cpuinit init_idle(struct task_struct *idle, int cpu)
5329{
5330        struct rq *rq = cpu_rq(cpu);
5331        unsigned long flags;
5332
5333        raw_spin_lock_irqsave(&rq->lock, flags);
5334
5335        __sched_fork(idle);
5336        idle->state = TASK_RUNNING;
5337        idle->se.exec_start = sched_clock();
5338
5339        cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
5340        __set_task_cpu(idle, cpu);
5341
5342        rq->curr = rq->idle = idle;
5343#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
5344        idle->oncpu = 1;
5345#endif
5346        raw_spin_unlock_irqrestore(&rq->lock, flags);
5347
5348        /* Set the preempt count _outside_ the spinlocks! */
5349#if defined(CONFIG_PREEMPT)
5350        task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
5351#else
5352        task_thread_info(idle)->preempt_count = 0;
5353#endif
5354        /*
5355         * The idle tasks have their own, simple scheduling class:
5356         */
5357        idle->sched_class = &idle_sched_class;
5358        ftrace_graph_init_task(idle);
5359}
5360
5361/*
5362 * In a system that switches off the HZ timer nohz_cpu_mask
5363 * indicates which cpus entered this state. This is used
5364 * in the rcu update to wait only for active cpus. For system
5365 * which do not switch off the HZ timer nohz_cpu_mask should
5366 * always be CPU_BITS_NONE.
5367 */
5368cpumask_var_t nohz_cpu_mask;
5369
5370/*
5371 * Increase the granularity value when there are more CPUs,
5372 * because with more CPUs the 'effective latency' as visible
5373 * to users decreases. But the relationship is not linear,
5374 * so pick a second-best guess by going with the log2 of the
5375 * number of CPUs.
5376 *
5377 * This idea comes from the SD scheduler of Con Kolivas:
5378 */
5379static int get_update_sysctl_factor(void)
5380{
5381        unsigned int cpus = min_t(int, num_online_cpus(), 8);
5382        unsigned int factor;
5383
5384        switch (sysctl_sched_tunable_scaling) {
5385        case SCHED_TUNABLESCALING_NONE:
5386                factor = 1;
5387                break;
5388        case SCHED_TUNABLESCALING_LINEAR:
5389                factor = cpus;
5390                break;
5391        case SCHED_TUNABLESCALING_LOG:
5392        default:
5393                factor = 1 + ilog2(cpus);
5394                break;
5395        }
5396
5397        return factor;
5398}
5399
5400static void update_sysctl(void)
5401{
5402        unsigned int factor = get_update_sysctl_factor();
5403
5404#define SET_SYSCTL(name) \
5405        (sysctl_##name = (factor) * normalized_sysctl_##name)
5406        SET_SYSCTL(sched_min_granularity);
5407        SET_SYSCTL(sched_latency);
5408        SET_SYSCTL(sched_wakeup_granularity);
5409        SET_SYSCTL(sched_shares_ratelimit);
5410#undef SET_SYSCTL
5411}
5412
5413static inline void sched_init_granularity(void)
5414{
5415        update_sysctl();
5416}
5417
5418#ifdef CONFIG_SMP
5419/*
5420 * This is how migration works:
5421 *
5422 * 1) we invoke migration_cpu_stop() on the target CPU using
5423 *    stop_one_cpu().
5424 * 2) stopper starts to run (implicitly forcing the migrated thread
5425 *    off the CPU)
5426 * 3) it checks whether the migrated task is still in the wrong runqueue.
5427 * 4) if it's in the wrong runqueue then the migration thread removes
5428 *    it and puts it into the right queue.
5429 * 5) stopper completes and stop_one_cpu() returns and the migration
5430 *    is done.
5431 */
5432
5433/*
5434 * Change a given task's CPU affinity. Migrate the thread to a
5435 * proper CPU and schedule it away if the CPU it's executing on
5436 * is removed from the allowed bitmask.
5437 *
5438 * NOTE: the caller must have a valid reference to the task, the
5439 * task must not exit() & deallocate itself prematurely. The
5440 * call is not atomic; no spinlocks may be held.
5441 */
5442int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
5443{
5444        unsigned long flags;
5445        struct rq *rq;
5446        unsigned int dest_cpu;
5447        int ret = 0;
5448
5449        /*
5450         * Serialize against TASK_WAKING so that ttwu() and wunt() can
5451         * drop the rq->lock and still rely on ->cpus_allowed.
5452         */
5453again:
5454        while (task_is_waking(p))
5455                cpu_relax();
5456        rq = task_rq_lock(p, &flags);
5457        if (task_is_waking(p)) {
5458                task_rq_unlock(rq, &flags);
5459                goto again;
5460        }
5461
5462        if (!cpumask_intersects(new_mask, cpu_active_mask)) {
5463                ret = -EINVAL;
5464                goto out;
5465        }
5466
5467        if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
5468                     !cpumask_equal(&p->cpus_allowed, new_mask))) {
5469                ret = -EINVAL;
5470                goto out;
5471        }
5472
5473        if (p->sched_class->set_cpus_allowed)
5474                p->sched_class->set_cpus_allowed(p, new_mask);
5475        else {
5476                cpumask_copy(&p->cpus_allowed, new_mask);
5477                p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
5478        }
5479
5480        /* Can the task run on the task's current CPU? If so, we're done */
5481        if (cpumask_test_cpu(task_cpu(p), new_mask))
5482                goto out;
5483
5484        dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
5485        if (migrate_task(p, dest_cpu)) {
5486                struct migration_arg arg = { p, dest_cpu };
5487                /* Need help from migration thread: drop lock and wait. */
5488                task_rq_unlock(rq, &flags);
5489                stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
5490                tlb_migrate_finish(p->mm);
5491                return 0;
5492        }
5493out:
5494        task_rq_unlock(rq, &flags);
5495
5496        return ret;
5497}
5498EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
5499
5500/*
5501 * Move (not current) task off this cpu, onto dest cpu. We're doing
5502 * this because either it can't run here any more (set_cpus_allowed()
5503 * away from this CPU, or CPU going down), or because we're
5504 * attempting to rebalance this task on exec (sched_exec).
5505 *
5506 * So we race with normal scheduler movements, but that's OK, as long
5507 * as the task is no longer on this CPU.
5508 *
5509 * Returns non-zero if task was successfully migrated.
5510 */
5511static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
5512{
5513        struct rq *rq_dest, *rq_src;
5514        int ret = 0;
5515
5516        if (unlikely(!cpu_active(dest_cpu)))
5517                return ret;
5518
5519        rq_src = cpu_rq(src_cpu);
5520        rq_dest = cpu_rq(dest_cpu);
5521
5522        double_rq_lock(rq_src, rq_dest);
5523        /* Already moved. */
5524        if (task_cpu(p) != src_cpu)
5525                goto done;
5526        /* Affinity changed (again). */
5527        if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
5528                goto fail;
5529
5530        /*
5531         * If we're not on a rq, the next wake-up will ensure we're
5532         * placed properly.
5533         */
5534        if (p->se.on_rq) {
5535                deactivate_task(rq_src, p, 0);
5536                set_task_cpu(p, dest_cpu);
5537                activate_task(rq_dest, p, 0);
5538                check_preempt_curr(rq_dest, p, 0);
5539        }
5540done:
5541        ret = 1;
5542fail:
5543        double_rq_unlock(rq_src, rq_dest);
5544        return ret;
5545}
5546
5547/*
5548 * migration_cpu_stop - this will be executed by a highprio stopper thread
5549 * and performs thread migration by bumping thread off CPU then
5550 * 'pushing' onto another runqueue.
5551 */
5552static int migration_cpu_stop(void *data)
5553{
5554        struct migration_arg *arg = data;
5555
5556        /*
5557         * The original target cpu might have gone down and we might
5558         * be on another cpu but it doesn't matter.
5559         */
5560        local_irq_disable();
5561        __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
5562        local_irq_enable();
5563        return 0;
5564}
5565
5566#ifdef CONFIG_HOTPLUG_CPU
5567/*
5568 * Figure out where task on dead CPU should go, use force if necessary.
5569 */
5570void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
5571{
5572        struct rq *rq = cpu_rq(dead_cpu);
5573        int needs_cpu, uninitialized_var(dest_cpu);
5574        unsigned long flags;
5575
5576        local_irq_save(flags);
5577
5578        raw_spin_lock(&rq->lock);
5579        needs_cpu = (task_cpu(p) == dead_cpu) && (p->state != TASK_WAKING);
5580        if (needs_cpu)
5581                dest_cpu = select_fallback_rq(dead_cpu, p);
5582        raw_spin_unlock(&rq->lock);
5583        /*
5584         * It can only fail if we race with set_cpus_allowed(),
5585         * in the racer should migrate the task anyway.
5586         */
5587        if (needs_cpu)
5588                __migrate_task(p, dead_cpu, dest_cpu);
5589        local_irq_restore(flags);
5590}
5591
5592/*
5593 * While a dead CPU has no uninterruptible tasks queued at this point,
5594 * it might still have a nonzero ->nr_uninterruptible counter, because
5595 * for performance reasons the counter is not stricly tracking tasks to
5596 * their home CPUs. So we just add the counter to another CPU's counter,
5597 * to keep the global sum constant after CPU-down:
5598 */
5599static void migrate_nr_uninterruptible(struct rq *rq_src)
5600{
5601        struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
5602        unsigned long flags;
5603
5604        local_irq_save(flags);
5605        double_rq_lock(rq_src, rq_dest);
5606        rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
5607        rq_src->nr_uninterruptible = 0;
5608        double_rq_unlock(rq_src, rq_dest);
5609        local_irq_restore(flags);
5610}
5611
5612/* Run through task list and migrate tasks from the dead cpu. */
5613static void migrate_live_tasks(int src_cpu)
5614{
5615        struct task_struct *p, *t;
5616
5617        read_lock(&tasklist_lock);
5618
5619        do_each_thread(t, p) {
5620                if (p == current)
5621                        continue;
5622
5623                if (task_cpu(p) == src_cpu)
5624                        move_task_off_dead_cpu(src_cpu, p);
5625        } while_each_thread(t, p);
5626
5627        read_unlock(&tasklist_lock);
5628}
5629
5630/*
5631 * Schedules idle task to be the next runnable task on current CPU.
5632 * It does so by boosting its priority to highest possible.
5633 * Used by CPU offline code.
5634 */
5635void sched_idle_next(void)
5636{
5637        int this_cpu = smp_processor_id();
5638        struct rq *rq = cpu_rq(this_cpu);
5639        struct task_struct *p = rq->idle;
5640        unsigned long flags;
5641
5642        /* cpu has to be offline */
5643        BUG_ON(cpu_online(this_cpu));
5644
5645        /*
5646         * Strictly not necessary since rest of the CPUs are stopped by now
5647         * and interrupts disabled on the current cpu.
5648         */
5649        raw_spin_lock_irqsave(&rq->lock, flags);
5650
5651        __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
5652
5653        activate_task(rq, p, 0);
5654
5655        raw_spin_unlock_irqrestore(&rq->lock, flags);
5656}
5657
5658/*
5659 * Ensures that the idle task is using init_mm right before its cpu goes
5660 * offline.
5661 */
5662void idle_task_exit(void)
5663{
5664        struct mm_struct *mm = current->active_mm;
5665
5666        BUG_ON(cpu_online(smp_processor_id()));
5667
5668        if (mm != &init_mm)
5669                switch_mm(mm, &init_mm, current);
5670        mmdrop(mm);
5671}
5672
5673/* called under rq->lock with disabled interrupts */
5674static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
5675{
5676        struct rq *rq = cpu_rq(dead_cpu);
5677
5678        /* Must be exiting, otherwise would be on tasklist. */
5679        BUG_ON(!p->exit_state);
5680
5681        /* Cannot have done final schedule yet: would have vanished. */
5682        BUG_ON(p->state == TASK_DEAD);
5683
5684        get_task_struct(p);
5685
5686        /*
5687         * Drop lock around migration; if someone else moves it,
5688         * that's OK. No task can be added to this CPU, so iteration is
5689         * fine.
5690         */
5691        raw_spin_unlock_irq(&rq->lock);
5692        move_task_off_dead_cpu(dead_cpu, p);
5693        raw_spin_lock_irq(&rq->lock);
5694
5695        put_task_struct(p);
5696}
5697
5698/* release_task() removes task from tasklist, so we won't find dead tasks. */
5699static void migrate_dead_tasks(unsigned int dead_cpu)
5700{
5701        struct rq *rq = cpu_rq(dead_cpu);
5702        struct task_struct *next;
5703
5704        for ( ; ; ) {
5705                if (!rq->nr_running)
5706                        break;
5707                next = pick_next_task(rq);
5708                if (!next)
5709                        break;
5710                next->sched_class->put_prev_task(rq, next);
5711                migrate_dead(dead_cpu, next);
5712
5713        }
5714}
5715
5716/*
5717 * remove the tasks which were accounted by rq from calc_load_tasks.
5718 */
5719static void calc_global_load_remove(struct rq *rq)
5720{
5721        atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
5722        rq->calc_load_active = 0;
5723}
5724#endif /* CONFIG_HOTPLUG_CPU */
5725
5726#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
5727
5728static struct ctl_table sd_ctl_dir[] = {
5729        {
5730                .procname       = "sched_domain",
5731                .mode           = 0555,
5732        },
5733        {}
5734};
5735
5736static struct ctl_table sd_ctl_root[] = {
5737        {
5738                .procname       = "kernel",
5739                .mode           = 0555,
5740                .child          = sd_ctl_dir,
5741        },
5742        {}
5743};
5744
5745static struct ctl_table *sd_alloc_ctl_entry(int n)
5746{
5747        struct ctl_table *entry =
5748                kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
5749
5750        return entry;
5751}
5752
5753static void sd_free_ctl_entry(struct ctl_table **tablep)
5754{
5755        struct ctl_table *entry;
5756
5757        /*
5758         * In the intermediate directories, both the child directory and
5759         * procname are dynamically allocated and could fail but the mode
5760         * will always be set. In the lowest directory the names are
5761         * static strings and all have proc handlers.
5762         */
5763        for (entry = *tablep; entry->mode; entry++) {
5764                if (entry->child)
5765                        sd_free_ctl_entry(&entry->child);
5766                if (entry->proc_handler == NULL)
5767                        kfree(entry->procname);
5768        }
5769
5770        kfree(*tablep);
5771        *tablep = NULL;
5772}
5773
5774static void
5775set_table_entry(struct ctl_table *entry,
5776                const char *procname, void *data, int maxlen,
5777                mode_t mode, proc_handler *proc_handler)
5778{
5779        entry->procname = procname;
5780        entry->data = data;
5781        entry->maxlen = maxlen;
5782        entry->mode = mode;
5783        entry->proc_handler = proc_handler;
5784}
5785
5786static struct ctl_table *
5787sd_alloc_ctl_domain_table(struct sched_domain *sd)
5788{
5789        struct ctl_table *table = sd_alloc_ctl_entry(13);
5790
5791        if (table == NULL)
5792                return NULL;
5793
5794        set_table_entry(&table[0], "min_interval", &sd->min_interval,
5795                sizeof(long), 0644, proc_doulongvec_minmax);
5796        set_table_entry(&table[1], "max_interval", &sd->max_interval,
5797                sizeof(long), 0644, proc_doulongvec_minmax);
5798        set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
5799                sizeof(int), 0644, proc_dointvec_minmax);
5800        set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
5801                sizeof(int), 0644, proc_dointvec_minmax);
5802        set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
5803                sizeof(int), 0644, proc_dointvec_minmax);
5804        set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
5805                sizeof(int), 0644, proc_dointvec_minmax);
5806        set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
5807                sizeof(int), 0644, proc_dointvec_minmax);
5808        set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
5809                sizeof(int), 0644, proc_dointvec_minmax);
5810        set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
5811                sizeof(int), 0644, proc_dointvec_minmax);
5812        set_table_entry(&table[9], "cache_nice_tries",
5813                &sd->cache_nice_tries,
5814                sizeof(int), 0644, proc_dointvec_minmax);
5815        set_table_entry(&table[10], "flags", &sd->flags,
5816                sizeof(int), 0644, proc_dointvec_minmax);
5817        set_table_entry(&table[11], "name", sd->name,
5818                CORENAME_MAX_SIZE, 0444, proc_dostring);
5819        /* &table[12] is terminator */
5820
5821        return table;
5822}
5823
5824static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
5825{
5826        struct ctl_table *entry, *table;
5827        struct sched_domain *sd;
5828        int domain_num = 0, i;
5829        char buf[32];
5830
5831        for_each_domain(cpu, sd)
5832                domain_num++;
5833        entry = table = sd_alloc_ctl_entry(domain_num + 1);
5834        if (table == NULL)
5835                return NULL;
5836
5837        i = 0;
5838        for_each_domain(cpu, sd) {
5839                snprintf(buf, 32, "domain%d", i);
5840                entry->procname = kstrdup(buf, GFP_KERNEL);
5841                entry->mode = 0555;
5842                entry->child = sd_alloc_ctl_domain_table(sd);
5843                entry++;
5844                i++;
5845        }
5846        return table;
5847}
5848
5849static struct ctl_table_header *sd_sysctl_header;
5850static void register_sched_domain_sysctl(void)
5851{
5852        int i, cpu_num = num_possible_cpus();
5853        struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
5854        char buf[32];
5855
5856        WARN_ON(sd_ctl_dir[0].child);
5857        sd_ctl_dir[0].child = entry;
5858
5859        if (entry == NULL)
5860                return;
5861
5862        for_each_possible_cpu(i) {
5863                snprintf(buf, 32, "cpu%d", i);
5864                entry->procname = kstrdup(buf, GFP_KERNEL);
5865                entry->mode = 0555;
5866                entry->child = sd_alloc_ctl_cpu_table(i);
5867                entry++;
5868        }
5869
5870        WARN_ON(sd_sysctl_header);
5871        sd_sysctl_header = register_sysctl_table(sd_ctl_root);
5872}
5873
5874/* may be called multiple times per register */
5875static void unregister_sched_domain_sysctl(void)
5876{
5877        if (sd_sysctl_header)
5878                unregister_sysctl_table(sd_sysctl_header);
5879        sd_sysctl_header = NULL;
5880        if (sd_ctl_dir[0].child)
5881                sd_free_ctl_entry(&sd_ctl_dir[0].child);
5882}
5883#else
5884static void register_sched_domain_sysctl(void)
5885{
5886}
5887static void unregister_sched_domain_sysctl(void)
5888{
5889}
5890#endif
5891
5892static void set_rq_online(struct rq *rq)
5893{
5894        if (!rq->online) {
5895                const struct sched_class *class;
5896
5897                cpumask_set_cpu(rq->cpu, rq->rd->online);
5898                rq->online = 1;
5899
5900                for_each_class(class) {
5901                        if (class->rq_online)
5902                                class->rq_online(rq);
5903                }
5904        }
5905}
5906
5907static void set_rq_offline(struct rq *rq)
5908{
5909        if (rq->online) {
5910                const struct sched_class *class;
5911
5912                for_each_class(class) {
5913                        if (class->rq_offline)
5914                                class->rq_offline(rq);
5915                }
5916
5917                cpumask_clear_cpu(rq->cpu, rq->rd->online);
5918                rq->online = 0;
5919        }
5920}
5921
5922/*
5923 * migration_call - callback that gets triggered when a CPU is added.
5924 * Here we can start up the necessary migration thread for the new CPU.
5925 */
5926static int __cpuinit
5927migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5928{
5929        int cpu = (long)hcpu;
5930        unsigned long flags;
5931        struct rq *rq = cpu_rq(cpu);
5932
5933        switch (action) {
5934
5935        case CPU_UP_PREPARE:
5936        case CPU_UP_PREPARE_FROZEN:
5937                rq->calc_load_update = calc_load_update;
5938                break;
5939
5940        case CPU_ONLINE:
5941        case CPU_ONLINE_FROZEN:
5942                /* Update our root-domain */
5943                raw_spin_lock_irqsave(&rq->lock, flags);
5944                if (rq->rd) {
5945                        BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
5946
5947                        set_rq_online(rq);
5948                }
5949                raw_spin_unlock_irqrestore(&rq->lock, flags);
5950                break;
5951
5952#ifdef CONFIG_HOTPLUG_CPU
5953        case CPU_DEAD:
5954        case CPU_DEAD_FROZEN:
5955                migrate_live_tasks(cpu);
5956                /* Idle task back to normal (off runqueue, low prio) */
5957                raw_spin_lock_irq(&rq->lock);
5958                deactivate_task(rq, rq->idle, 0);
5959                __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
5960                rq->idle->sched_class = &idle_sched_class;
5961                migrate_dead_tasks(cpu);
5962                raw_spin_unlock_irq(&rq->lock);
5963                migrate_nr_uninterruptible(rq);
5964                BUG_ON(rq->nr_running != 0);
5965                calc_global_load_remove(rq);
5966                break;
5967
5968        case CPU_DYING:
5969        case CPU_DYING_FROZEN:
5970                /* Update our root-domain */
5971                raw_spin_lock_irqsave(&rq->lock, flags);
5972                if (rq->rd) {
5973                        BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
5974                        set_rq_offline(rq);
5975                }
5976                raw_spin_unlock_irqrestore(&rq->lock, flags);
5977                break;
5978#endif
5979        }
5980        return NOTIFY_OK;
5981}
5982
5983/*
5984 * Register at high priority so that task migration (migrate_all_tasks)
5985 * happens before everything else.  This has to be lower priority than
5986 * the notifier in the perf_event subsystem, though.
5987 */
5988static struct notifier_block __cpuinitdata migration_notifier = {
5989        .notifier_call = migration_call,
5990        .priority = CPU_PRI_MIGRATION,
5991};
5992
5993static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
5994                                      unsigned long action, void *hcpu)
5995{
5996        switch (action & ~CPU_TASKS_FROZEN) {
5997        case CPU_ONLINE:
5998        case CPU_DOWN_FAILED:
5999                set_cpu_active((long)hcpu, true);
6000                return NOTIFY_OK;
6001        default:
6002                return NOTIFY_DONE;
6003        }
6004}
6005
6006static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
6007                                        unsigned long action, void *hcpu)
6008{
6009        switch (action & ~CPU_TASKS_FROZEN) {
6010        case CPU_DOWN_PREPARE:
6011                set_cpu_active((long)hcpu, false);
6012                return NOTIFY_OK;
6013        default:
6014                return NOTIFY_DONE;
6015        }
6016}
6017
6018static int __init migration_init(void)
6019{
6020        void *cpu = (void *)(long)smp_processor_id();
6021        int err;
6022
6023        /* Initialize migration for the boot CPU */
6024        err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
6025        BUG_ON(err == NOTIFY_BAD);
6026        migration_call(&migration_notifier, CPU_ONLINE, cpu);
6027        register_cpu_notifier(&migration_notifier);
6028
6029        /* Register cpu active notifiers */
6030        cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
6031        cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
6032
6033        return 0;
6034}
6035early_initcall(migration_init);
6036#endif
6037
6038#ifdef CONFIG_SMP
6039
6040#ifdef CONFIG_SCHED_DEBUG
6041
6042static __read_mostly int sched_domain_debug_enabled;
6043
6044static int __init sched_domain_debug_setup(char *str)
6045{
6046        sched_domain_debug_enabled = 1;
6047
6048        return 0;
6049}
6050early_param("sched_debug", sched_domain_debug_setup);
6051
6052static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6053                                  struct cpumask *groupmask)
6054{
6055        struct sched_group *group = sd->groups;
6056        char str[256];
6057
6058        cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
6059        cpumask_clear(groupmask);
6060
6061        printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
6062
6063        if (!(sd->flags & SD_LOAD_BALANCE)) {
6064                printk("does not load-balance\n");
6065                if (sd->parent)
6066                        printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
6067                                        " has parent");
6068                return -1;
6069        }
6070
6071        printk(KERN_CONT "span %s level %s\n", str, sd->name);
6072
6073        if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
6074                printk(KERN_ERR "ERROR: domain->span does not contain "
6075                                "CPU%d\n", cpu);
6076        }
6077        if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
6078                printk(KERN_ERR "ERROR: domain->groups does not contain"
6079                                " CPU%d\n", cpu);
6080        }
6081
6082        printk(KERN_DEBUG "%*s groups:", level + 1, "");
6083        do {
6084                if (!group) {
6085                        printk("\n");
6086                        printk(KERN_ERR "ERROR: group is NULL\n");
6087                        break;
6088                }
6089
6090                if (!group->cpu_power) {
6091                        printk(KERN_CONT "\n");
6092                        printk(KERN_ERR "ERROR: domain->cpu_power not "
6093                                        "set\n");
6094                        break;
6095                }
6096
6097                if (!cpumask_weight(sched_group_cpus(group))) {
6098