linux/kernel/sched.c
<<
>>
Prefs
   1/*
   2 *  kernel/sched.c
   3 *
   4 *  Kernel scheduler and related syscalls
   5 *
   6 *  Copyright (C) 1991-2002  Linus Torvalds
   7 *
   8 *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
   9 *              make semaphores SMP safe
  10 *  1998-11-19  Implemented schedule_timeout() and related stuff
  11 *              by Andrea Arcangeli
  12 *  2002-01-04  New ultra-scalable O(1) scheduler by Ingo Molnar:
  13 *              hybrid priority-list and round-robin design with
  14 *              an array-switch method of distributing timeslices
  15 *              and per-CPU runqueues.  Cleanups and useful suggestions
  16 *              by Davide Libenzi, preemptible kernel bits by Robert Love.
  17 *  2003-09-03  Interactivity tuning by Con Kolivas.
  18 *  2004-04-02  Scheduler domains code by Nick Piggin
  19 */
  20
  21#include <linux/mm.h>
  22#include <linux/module.h>
  23#include <linux/nmi.h>
  24#include <linux/init.h>
  25#include <asm/uaccess.h>
  26#include <linux/highmem.h>
  27#include <linux/smp_lock.h>
  28#include <asm/mmu_context.h>
  29#include <linux/interrupt.h>
  30#include <linux/capability.h>
  31#include <linux/completion.h>
  32#include <linux/kernel_stat.h>
  33#include <linux/debug_locks.h>
  34#include <linux/security.h>
  35#include <linux/notifier.h>
  36#include <linux/profile.h>
  37#include <linux/suspend.h>
  38#include <linux/vmalloc.h>
  39#include <linux/blkdev.h>
  40#include <linux/delay.h>
  41#include <linux/smp.h>
  42#include <linux/threads.h>
  43#include <linux/timer.h>
  44#include <linux/rcupdate.h>
  45#include <linux/cpu.h>
  46#include <linux/cpuset.h>
  47#include <linux/percpu.h>
  48#include <linux/kthread.h>
  49#include <linux/seq_file.h>
  50#include <linux/syscalls.h>
  51#include <linux/times.h>
  52#include <linux/tsacct_kern.h>
  53#include <linux/kprobes.h>
  54#include <linux/delayacct.h>
  55#include <asm/tlb.h>
  56
  57#include <asm/unistd.h>
  58
  59/*
  60 * Convert user-nice values [ -20 ... 0 ... 19 ]
  61 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
  62 * and back.
  63 */
  64#define NICE_TO_PRIO(nice)      (MAX_RT_PRIO + (nice) + 20)
  65#define PRIO_TO_NICE(prio)      ((prio) - MAX_RT_PRIO - 20)
  66#define TASK_NICE(p)            PRIO_TO_NICE((p)->static_prio)
  67
  68/*
  69 * 'User priority' is the nice value converted to something we
  70 * can work with better when scaling various scheduler parameters,
  71 * it's a [ 0 ... 39 ] range.
  72 */
  73#define USER_PRIO(p)            ((p)-MAX_RT_PRIO)
  74#define TASK_USER_PRIO(p)       USER_PRIO((p)->static_prio)
  75#define MAX_USER_PRIO           (USER_PRIO(MAX_PRIO))
  76
  77/*
  78 * Some helpers for converting nanosecond timing to jiffy resolution
  79 */
  80#define NS_TO_JIFFIES(TIME)     ((TIME) / (1000000000 / HZ))
  81#define JIFFIES_TO_NS(TIME)     ((TIME) * (1000000000 / HZ))
  82
  83/*
  84 * These are the 'tuning knobs' of the scheduler:
  85 *
  86 * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
  87 * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
  88 * Timeslices get refilled after they expire.
  89 */
  90#define MIN_TIMESLICE           max(5 * HZ / 1000, 1)
  91#define DEF_TIMESLICE           (100 * HZ / 1000)
  92#define ON_RUNQUEUE_WEIGHT       30
  93#define CHILD_PENALTY            95
  94#define PARENT_PENALTY          100
  95#define EXIT_WEIGHT               3
  96#define PRIO_BONUS_RATIO         25
  97#define MAX_BONUS               (MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
  98#define INTERACTIVE_DELTA         2
  99#define MAX_SLEEP_AVG           (DEF_TIMESLICE * MAX_BONUS)
 100#define STARVATION_LIMIT        (MAX_SLEEP_AVG)
 101#define NS_MAX_SLEEP_AVG        (JIFFIES_TO_NS(MAX_SLEEP_AVG))
 102
 103/*
 104 * If a task is 'interactive' then we reinsert it in the active
 105 * array after it has expired its current timeslice. (it will not
 106 * continue to run immediately, it will still roundrobin with
 107 * other interactive tasks.)
 108 *
 109 * This part scales the interactivity limit depending on niceness.
 110 *
 111 * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
 112 * Here are a few examples of different nice levels:
 113 *
 114 *  TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
 115 *  TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
 116 *  TASK_INTERACTIVE(  0): [1,1,1,1,0,0,0,0,0,0,0]
 117 *  TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
 118 *  TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
 119 *
 120 * (the X axis represents the possible -5 ... 0 ... +5 dynamic
 121 *  priority range a task can explore, a value of '1' means the
 122 *  task is rated interactive.)
 123 *
 124 * Ie. nice +19 tasks can never get 'interactive' enough to be
 125 * reinserted into the active array. And only heavily CPU-hog nice -20
 126 * tasks will be expired. Default nice 0 tasks are somewhere between,
 127 * it takes some effort for them to get interactive, but it's not
 128 * too hard.
 129 */
 130
 131#define CURRENT_BONUS(p) \
 132        (NS_TO_JIFFIES((p)->sleep_avg) * MAX_BONUS / \
 133                MAX_SLEEP_AVG)
 134
 135#define GRANULARITY     (10 * HZ / 1000 ? : 1)
 136
 137#ifdef CONFIG_SMP
 138#define TIMESLICE_GRANULARITY(p)        (GRANULARITY * \
 139                (1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)) * \
 140                        num_online_cpus())
 141#else
 142#define TIMESLICE_GRANULARITY(p)        (GRANULARITY * \
 143                (1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)))
 144#endif
 145
 146#define SCALE(v1,v1_max,v2_max) \
 147        (v1) * (v2_max) / (v1_max)
 148
 149#define DELTA(p) \
 150        (SCALE(TASK_NICE(p) + 20, 40, MAX_BONUS) - 20 * MAX_BONUS / 40 + \
 151                INTERACTIVE_DELTA)
 152
 153#define TASK_INTERACTIVE(p) \
 154        ((p)->prio <= (p)->static_prio - DELTA(p))
 155
 156#define INTERACTIVE_SLEEP(p) \
 157        (JIFFIES_TO_NS(MAX_SLEEP_AVG * \
 158                (MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
 159
 160#define TASK_PREEMPTS_CURR(p, rq) \
 161        ((p)->prio < (rq)->curr->prio)
 162
 163#define SCALE_PRIO(x, prio) \
 164        max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
 165
 166static unsigned int static_prio_timeslice(int static_prio)
 167{
 168        if (static_prio < NICE_TO_PRIO(0))
 169                return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
 170        else
 171                return SCALE_PRIO(DEF_TIMESLICE, static_prio);
 172}
 173
 174/*
 175 * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
 176 * to time slice values: [800ms ... 100ms ... 5ms]
 177 *
 178 * The higher a thread's priority, the bigger timeslices
 179 * it gets during one round of execution. But even the lowest
 180 * priority thread gets MIN_TIMESLICE worth of execution time.
 181 */
 182
 183static inline unsigned int task_timeslice(struct task_struct *p)
 184{
 185        return static_prio_timeslice(p->static_prio);
 186}
 187
 188/*
 189 * These are the runqueue data structures:
 190 */
 191
 192struct prio_array {
 193        unsigned int nr_active;
 194        DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */
 195        struct list_head queue[MAX_PRIO];
 196};
 197
 198/*
 199 * This is the main, per-CPU runqueue data structure.
 200 *
 201 * Locking rule: those places that want to lock multiple runqueues
 202 * (such as the load balancing or the thread migration code), lock
 203 * acquire operations must be ordered by ascending &runqueue.
 204 */
 205struct rq {
 206        spinlock_t lock;
 207
 208        /*
 209         * nr_running and cpu_load should be in the same cacheline because
 210         * remote CPUs use both these fields when doing load calculation.
 211         */
 212        unsigned long nr_running;
 213        unsigned long raw_weighted_load;
 214#ifdef CONFIG_SMP
 215        unsigned long cpu_load[3];
 216#endif
 217        unsigned long long nr_switches;
 218
 219        /*
 220         * This is part of a global counter where only the total sum
 221         * over all CPUs matters. A task can increase this counter on
 222         * one CPU and if it got migrated afterwards it may decrease
 223         * it on another CPU. Always updated under the runqueue lock:
 224         */
 225        unsigned long nr_uninterruptible;
 226
 227        unsigned long expired_timestamp;
 228        unsigned long long timestamp_last_tick;
 229        struct task_struct *curr, *idle;
 230        struct mm_struct *prev_mm;
 231        struct prio_array *active, *expired, arrays[2];
 232        int best_expired_prio;
 233        atomic_t nr_iowait;
 234
 235#ifdef CONFIG_SMP
 236        struct sched_domain *sd;
 237
 238        /* For active balancing */
 239        int active_balance;
 240        int push_cpu;
 241        int cpu;                /* cpu of this runqueue */
 242
 243        struct task_struct *migration_thread;
 244        struct list_head migration_queue;
 245#endif
 246
 247#ifdef CONFIG_SCHEDSTATS
 248        /* latency stats */
 249        struct sched_info rq_sched_info;
 250
 251        /* sys_sched_yield() stats */
 252        unsigned long yld_exp_empty;
 253        unsigned long yld_act_empty;
 254        unsigned long yld_both_empty;
 255        unsigned long yld_cnt;
 256
 257        /* schedule() stats */
 258        unsigned long sched_switch;
 259        unsigned long sched_cnt;
 260        unsigned long sched_goidle;
 261
 262        /* try_to_wake_up() stats */
 263        unsigned long ttwu_cnt;
 264        unsigned long ttwu_local;
 265#endif
 266        struct lock_class_key rq_lock_key;
 267};
 268
 269static DEFINE_PER_CPU(struct rq, runqueues);
 270
 271static inline int cpu_of(struct rq *rq)
 272{
 273#ifdef CONFIG_SMP
 274        return rq->cpu;
 275#else
 276        return 0;
 277#endif
 278}
 279
 280/*
 281 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
 282 * See detach_destroy_domains: synchronize_sched for details.
 283 *
 284 * The domain tree of any CPU may only be accessed from within
 285 * preempt-disabled sections.
 286 */
 287#define for_each_domain(cpu, __sd) \
 288        for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
 289
 290#define cpu_rq(cpu)             (&per_cpu(runqueues, (cpu)))
 291#define this_rq()               (&__get_cpu_var(runqueues))
 292#define task_rq(p)              cpu_rq(task_cpu(p))
 293#define cpu_curr(cpu)           (cpu_rq(cpu)->curr)
 294
 295#ifndef prepare_arch_switch
 296# define prepare_arch_switch(next)      do { } while (0)
 297#endif
 298#ifndef finish_arch_switch
 299# define finish_arch_switch(prev)       do { } while (0)
 300#endif
 301
 302#ifndef __ARCH_WANT_UNLOCKED_CTXSW
 303static inline int task_running(struct rq *rq, struct task_struct *p)
 304{
 305        return rq->curr == p;
 306}
 307
 308static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
 309{
 310}
 311
 312static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
 313{
 314#ifdef CONFIG_DEBUG_SPINLOCK
 315        /* this is a valid case when another task releases the spinlock */
 316        rq->lock.owner = current;
 317#endif
 318        /*
 319         * If we are tracking spinlock dependencies then we have to
 320         * fix up the runqueue lock - which gets 'carried over' from
 321         * prev into current:
 322         */
 323        spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
 324
 325        spin_unlock_irq(&rq->lock);
 326}
 327
 328#else /* __ARCH_WANT_UNLOCKED_CTXSW */
 329static inline int task_running(struct rq *rq, struct task_struct *p)
 330{
 331#ifdef CONFIG_SMP
 332        return p->oncpu;
 333#else
 334        return rq->curr == p;
 335#endif
 336}
 337
 338static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
 339{
 340#ifdef CONFIG_SMP
 341        /*
 342         * We can optimise this out completely for !SMP, because the
 343         * SMP rebalancing from interrupt is the only thing that cares
 344         * here.
 345         */
 346        next->oncpu = 1;
 347#endif
 348#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
 349        spin_unlock_irq(&rq->lock);
 350#else
 351        spin_unlock(&rq->lock);
 352#endif
 353}
 354
 355static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
 356{
 357#ifdef CONFIG_SMP
 358        /*
 359         * After ->oncpu is cleared, the task can be moved to a different CPU.
 360         * We must ensure this doesn't happen until the switch is completely
 361         * finished.
 362         */
 363        smp_wmb();
 364        prev->oncpu = 0;
 365#endif
 366#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
 367        local_irq_enable();
 368#endif
 369}
 370#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
 371
 372/*
 373 * __task_rq_lock - lock the runqueue a given task resides on.
 374 * Must be called interrupts disabled.
 375 */
 376static inline struct rq *__task_rq_lock(struct task_struct *p)
 377        __acquires(rq->lock)
 378{
 379        struct rq *rq;
 380
 381repeat_lock_task:
 382        rq = task_rq(p);
 383        spin_lock(&rq->lock);
 384        if (unlikely(rq != task_rq(p))) {
 385                spin_unlock(&rq->lock);
 386                goto repeat_lock_task;
 387        }
 388        return rq;
 389}
 390
 391/*
 392 * task_rq_lock - lock the runqueue a given task resides on and disable
 393 * interrupts.  Note the ordering: we can safely lookup the task_rq without
 394 * explicitly disabling preemption.
 395 */
 396static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
 397        __acquires(rq->lock)
 398{
 399        struct rq *rq;
 400
 401repeat_lock_task:
 402        local_irq_save(*flags);
 403        rq = task_rq(p);
 404        spin_lock(&rq->lock);
 405        if (unlikely(rq != task_rq(p))) {
 406                spin_unlock_irqrestore(&rq->lock, *flags);
 407                goto repeat_lock_task;
 408        }
 409        return rq;
 410}
 411
 412static inline void __task_rq_unlock(struct rq *rq)
 413        __releases(rq->lock)
 414{
 415        spin_unlock(&rq->lock);
 416}
 417
 418static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
 419        __releases(rq->lock)
 420{
 421        spin_unlock_irqrestore(&rq->lock, *flags);
 422}
 423
 424#ifdef CONFIG_SCHEDSTATS
 425/*
 426 * bump this up when changing the output format or the meaning of an existing
 427 * format, so that tools can adapt (or abort)
 428 */
 429#define SCHEDSTAT_VERSION 12
 430
 431static int show_schedstat(struct seq_file *seq, void *v)
 432{
 433        int cpu;
 434
 435        seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
 436        seq_printf(seq, "timestamp %lu\n", jiffies);
 437        for_each_online_cpu(cpu) {
 438                struct rq *rq = cpu_rq(cpu);
 439#ifdef CONFIG_SMP
 440                struct sched_domain *sd;
 441                int dcnt = 0;
 442#endif
 443
 444                /* runqueue-specific stats */
 445                seq_printf(seq,
 446                    "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
 447                    cpu, rq->yld_both_empty,
 448                    rq->yld_act_empty, rq->yld_exp_empty, rq->yld_cnt,
 449                    rq->sched_switch, rq->sched_cnt, rq->sched_goidle,
 450                    rq->ttwu_cnt, rq->ttwu_local,
 451                    rq->rq_sched_info.cpu_time,
 452                    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt);
 453
 454                seq_printf(seq, "\n");
 455
 456#ifdef CONFIG_SMP
 457                /* domain-specific stats */
 458                preempt_disable();
 459                for_each_domain(cpu, sd) {
 460                        enum idle_type itype;
 461                        char mask_str[NR_CPUS];
 462
 463                        cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
 464                        seq_printf(seq, "domain%d %s", dcnt++, mask_str);
 465                        for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES;
 466                                        itype++) {
 467                                seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu",
 468                                    sd->lb_cnt[itype],
 469                                    sd->lb_balanced[itype],
 470                                    sd->lb_failed[itype],
 471                                    sd->lb_imbalance[itype],
 472                                    sd->lb_gained[itype],
 473                                    sd->lb_hot_gained[itype],
 474                                    sd->lb_nobusyq[itype],
 475                                    sd->lb_nobusyg[itype]);
 476                        }
 477                        seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
 478                            sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
 479                            sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
 480                            sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
 481                            sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance);
 482                }
 483                preempt_enable();
 484#endif
 485        }
 486        return 0;
 487}
 488
 489static int schedstat_open(struct inode *inode, struct file *file)
 490{
 491        unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
 492        char *buf = kmalloc(size, GFP_KERNEL);
 493        struct seq_file *m;
 494        int res;
 495
 496        if (!buf)
 497                return -ENOMEM;
 498        res = single_open(file, show_schedstat, NULL);
 499        if (!res) {
 500                m = file->private_data;
 501                m->buf = buf;
 502                m->size = size;
 503        } else
 504                kfree(buf);
 505        return res;
 506}
 507
 508struct file_operations proc_schedstat_operations = {
 509        .open    = schedstat_open,
 510        .read    = seq_read,
 511        .llseek  = seq_lseek,
 512        .release = single_release,
 513};
 514
 515/*
 516 * Expects runqueue lock to be held for atomicity of update
 517 */
 518static inline void
 519rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
 520{
 521        if (rq) {
 522                rq->rq_sched_info.run_delay += delta_jiffies;
 523                rq->rq_sched_info.pcnt++;
 524        }
 525}
 526
 527/*
 528 * Expects runqueue lock to be held for atomicity of update
 529 */
 530static inline void
 531rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
 532{
 533        if (rq)
 534                rq->rq_sched_info.cpu_time += delta_jiffies;
 535}
 536# define schedstat_inc(rq, field)       do { (rq)->field++; } while (0)
 537# define schedstat_add(rq, field, amt)  do { (rq)->field += (amt); } while (0)
 538#else /* !CONFIG_SCHEDSTATS */
 539static inline void
 540rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
 541{}
 542static inline void
 543rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
 544{}
 545# define schedstat_inc(rq, field)       do { } while (0)
 546# define schedstat_add(rq, field, amt)  do { } while (0)
 547#endif
 548
 549/*
 550 * rq_lock - lock a given runqueue and disable interrupts.
 551 */
 552static inline struct rq *this_rq_lock(void)
 553        __acquires(rq->lock)
 554{
 555        struct rq *rq;
 556
 557        local_irq_disable();
 558        rq = this_rq();
 559        spin_lock(&rq->lock);
 560
 561        return rq;
 562}
 563
 564#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
 565/*
 566 * Called when a process is dequeued from the active array and given
 567 * the cpu.  We should note that with the exception of interactive
 568 * tasks, the expired queue will become the active queue after the active
 569 * queue is empty, without explicitly dequeuing and requeuing tasks in the
 570 * expired queue.  (Interactive tasks may be requeued directly to the
 571 * active queue, thus delaying tasks in the expired queue from running;
 572 * see scheduler_tick()).
 573 *
 574 * This function is only called from sched_info_arrive(), rather than
 575 * dequeue_task(). Even though a task may be queued and dequeued multiple
 576 * times as it is shuffled about, we're really interested in knowing how
 577 * long it was from the *first* time it was queued to the time that it
 578 * finally hit a cpu.
 579 */
 580static inline void sched_info_dequeued(struct task_struct *t)
 581{
 582        t->sched_info.last_queued = 0;
 583}
 584
 585/*
 586 * Called when a task finally hits the cpu.  We can now calculate how
 587 * long it was waiting to run.  We also note when it began so that we
 588 * can keep stats on how long its timeslice is.
 589 */
 590static void sched_info_arrive(struct task_struct *t)
 591{
 592        unsigned long now = jiffies, delta_jiffies = 0;
 593
 594        if (t->sched_info.last_queued)
 595                delta_jiffies = now - t->sched_info.last_queued;
 596        sched_info_dequeued(t);
 597        t->sched_info.run_delay += delta_jiffies;
 598        t->sched_info.last_arrival = now;
 599        t->sched_info.pcnt++;
 600
 601        rq_sched_info_arrive(task_rq(t), delta_jiffies);
 602}
 603
 604/*
 605 * Called when a process is queued into either the active or expired
 606 * array.  The time is noted and later used to determine how long we
 607 * had to wait for us to reach the cpu.  Since the expired queue will
 608 * become the active queue after active queue is empty, without dequeuing
 609 * and requeuing any tasks, we are interested in queuing to either. It
 610 * is unusual but not impossible for tasks to be dequeued and immediately
 611 * requeued in the same or another array: this can happen in sched_yield(),
 612 * set_user_nice(), and even load_balance() as it moves tasks from runqueue
 613 * to runqueue.
 614 *
 615 * This function is only called from enqueue_task(), but also only updates
 616 * the timestamp if it is already not set.  It's assumed that
 617 * sched_info_dequeued() will clear that stamp when appropriate.
 618 */
 619static inline void sched_info_queued(struct task_struct *t)
 620{
 621        if (unlikely(sched_info_on()))
 622                if (!t->sched_info.last_queued)
 623                        t->sched_info.last_queued = jiffies;
 624}
 625
 626/*
 627 * Called when a process ceases being the active-running process, either
 628 * voluntarily or involuntarily.  Now we can calculate how long we ran.
 629 */
 630static inline void sched_info_depart(struct task_struct *t)
 631{
 632        unsigned long delta_jiffies = jiffies - t->sched_info.last_arrival;
 633
 634        t->sched_info.cpu_time += delta_jiffies;
 635        rq_sched_info_depart(task_rq(t), delta_jiffies);
 636}
 637
 638/*
 639 * Called when tasks are switched involuntarily due, typically, to expiring
 640 * their time slice.  (This may also be called when switching to or from
 641 * the idle task.)  We are only called when prev != next.
 642 */
 643static inline void
 644__sched_info_switch(struct task_struct *prev, struct task_struct *next)
 645{
 646        struct rq *rq = task_rq(prev);
 647
 648        /*
 649         * prev now departs the cpu.  It's not interesting to record
 650         * stats about how efficient we were at scheduling the idle
 651         * process, however.
 652         */
 653        if (prev != rq->idle)
 654                sched_info_depart(prev);
 655
 656        if (next != rq->idle)
 657                sched_info_arrive(next);
 658}
 659static inline void
 660sched_info_switch(struct task_struct *prev, struct task_struct *next)
 661{
 662        if (unlikely(sched_info_on()))
 663                __sched_info_switch(prev, next);
 664}
 665#else
 666#define sched_info_queued(t)            do { } while (0)
 667#define sched_info_switch(t, next)      do { } while (0)
 668#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
 669
 670/*
 671 * Adding/removing a task to/from a priority array:
 672 */
 673static void dequeue_task(struct task_struct *p, struct prio_array *array)
 674{
 675        array->nr_active--;
 676        list_del(&p->run_list);
 677        if (list_empty(array->queue + p->prio))
 678                __clear_bit(p->prio, array->bitmap);
 679}
 680
 681static void enqueue_task(struct task_struct *p, struct prio_array *array)
 682{
 683        sched_info_queued(p);
 684        list_add_tail(&p->run_list, array->queue + p->prio);
 685        __set_bit(p->prio, array->bitmap);
 686        array->nr_active++;
 687        p->array = array;
 688}
 689
 690/*
 691 * Put task to the end of the run list without the overhead of dequeue
 692 * followed by enqueue.
 693 */
 694static void requeue_task(struct task_struct *p, struct prio_array *array)
 695{
 696        list_move_tail(&p->run_list, array->queue + p->prio);
 697}
 698
 699static inline void
 700enqueue_task_head(struct task_struct *p, struct prio_array *array)
 701{
 702        list_add(&p->run_list, array->queue + p->prio);
 703        __set_bit(p->prio, array->bitmap);
 704        array->nr_active++;
 705        p->array = array;
 706}
 707
 708/*
 709 * __normal_prio - return the priority that is based on the static
 710 * priority but is modified by bonuses/penalties.
 711 *
 712 * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
 713 * into the -5 ... 0 ... +5 bonus/penalty range.
 714 *
 715 * We use 25% of the full 0...39 priority range so that:
 716 *
 717 * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
 718 * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
 719 *
 720 * Both properties are important to certain workloads.
 721 */
 722
 723static inline int __normal_prio(struct task_struct *p)
 724{
 725        int bonus, prio;
 726
 727        bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
 728
 729        prio = p->static_prio - bonus;
 730        if (prio < MAX_RT_PRIO)
 731                prio = MAX_RT_PRIO;
 732        if (prio > MAX_PRIO-1)
 733                prio = MAX_PRIO-1;
 734        return prio;
 735}
 736
 737/*
 738 * To aid in avoiding the subversion of "niceness" due to uneven distribution
 739 * of tasks with abnormal "nice" values across CPUs the contribution that
 740 * each task makes to its run queue's load is weighted according to its
 741 * scheduling class and "nice" value.  For SCHED_NORMAL tasks this is just a
 742 * scaled version of the new time slice allocation that they receive on time
 743 * slice expiry etc.
 744 */
 745
 746/*
 747 * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE
 748 * If static_prio_timeslice() is ever changed to break this assumption then
 749 * this code will need modification
 750 */
 751#define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
 752#define LOAD_WEIGHT(lp) \
 753        (((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO)
 754#define PRIO_TO_LOAD_WEIGHT(prio) \
 755        LOAD_WEIGHT(static_prio_timeslice(prio))
 756#define RTPRIO_TO_LOAD_WEIGHT(rp) \
 757        (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
 758
 759static void set_load_weight(struct task_struct *p)
 760{
 761        if (has_rt_policy(p)) {
 762#ifdef CONFIG_SMP
 763                if (p == task_rq(p)->migration_thread)
 764                        /*
 765                         * The migration thread does the actual balancing.
 766                         * Giving its load any weight will skew balancing
 767                         * adversely.
 768                         */
 769                        p->load_weight = 0;
 770                else
 771#endif
 772                        p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
 773        } else
 774                p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
 775}
 776
 777static inline void
 778inc_raw_weighted_load(struct rq *rq, const struct task_struct *p)
 779{
 780        rq->raw_weighted_load += p->load_weight;
 781}
 782
 783static inline void
 784dec_raw_weighted_load(struct rq *rq, const struct task_struct *p)
 785{
 786        rq->raw_weighted_load -= p->load_weight;
 787}
 788
 789static inline void inc_nr_running(struct task_struct *p, struct rq *rq)
 790{
 791        rq->nr_running++;
 792        inc_raw_weighted_load(rq, p);
 793}
 794
 795static inline void dec_nr_running(struct task_struct *p, struct rq *rq)
 796{
 797        rq->nr_running--;
 798        dec_raw_weighted_load(rq, p);
 799}
 800
 801/*
 802 * Calculate the expected normal priority: i.e. priority
 803 * without taking RT-inheritance into account. Might be
 804 * boosted by interactivity modifiers. Changes upon fork,
 805 * setprio syscalls, and whenever the interactivity
 806 * estimator recalculates.
 807 */
 808static inline int normal_prio(struct task_struct *p)
 809{
 810        int prio;
 811
 812        if (has_rt_policy(p))
 813                prio = MAX_RT_PRIO-1 - p->rt_priority;
 814        else
 815                prio = __normal_prio(p);
 816        return prio;
 817}
 818
 819/*
 820 * Calculate the current priority, i.e. the priority
 821 * taken into account by the scheduler. This value might
 822 * be boosted by RT tasks, or might be boosted by
 823 * interactivity modifiers. Will be RT if the task got
 824 * RT-boosted. If not then it returns p->normal_prio.
 825 */
 826static int effective_prio(struct task_struct *p)
 827{
 828        p->normal_prio = normal_prio(p);
 829        /*
 830         * If we are RT tasks or we were boosted to RT priority,
 831         * keep the priority unchanged. Otherwise, update priority
 832         * to the normal priority:
 833         */
 834        if (!rt_prio(p->prio))
 835                return p->normal_prio;
 836        return p->prio;
 837}
 838
 839/*
 840 * __activate_task - move a task to the runqueue.
 841 */
 842static void __activate_task(struct task_struct *p, struct rq *rq)
 843{
 844        struct prio_array *target = rq->active;
 845
 846        if (batch_task(p))
 847                target = rq->expired;
 848        enqueue_task(p, target);
 849        inc_nr_running(p, rq);
 850}
 851
 852/*
 853 * __activate_idle_task - move idle task to the _front_ of runqueue.
 854 */
 855static inline void __activate_idle_task(struct task_struct *p, struct rq *rq)
 856{
 857        enqueue_task_head(p, rq->active);
 858        inc_nr_running(p, rq);
 859}
 860
 861/*
 862 * Recalculate p->normal_prio and p->prio after having slept,
 863 * updating the sleep-average too:
 864 */
 865static int recalc_task_prio(struct task_struct *p, unsigned long long now)
 866{
 867        /* Caller must always ensure 'now >= p->timestamp' */
 868        unsigned long sleep_time = now - p->timestamp;
 869
 870        if (batch_task(p))
 871                sleep_time = 0;
 872
 873        if (likely(sleep_time > 0)) {
 874                /*
 875                 * This ceiling is set to the lowest priority that would allow
 876                 * a task to be reinserted into the active array on timeslice
 877                 * completion.
 878                 */
 879                unsigned long ceiling = INTERACTIVE_SLEEP(p);
 880
 881                if (p->mm && sleep_time > ceiling && p->sleep_avg < ceiling) {
 882                        /*
 883                         * Prevents user tasks from achieving best priority
 884                         * with one single large enough sleep.
 885                         */
 886                        p->sleep_avg = ceiling;
 887                        /*
 888                         * Using INTERACTIVE_SLEEP() as a ceiling places a
 889                         * nice(0) task 1ms sleep away from promotion, and
 890                         * gives it 700ms to round-robin with no chance of
 891                         * being demoted.  This is more than generous, so
 892                         * mark this sleep as non-interactive to prevent the
 893                         * on-runqueue bonus logic from intervening should
 894                         * this task not receive cpu immediately.
 895                         */
 896                        p->sleep_type = SLEEP_NONINTERACTIVE;
 897                } else {
 898                        /*
 899                         * Tasks waking from uninterruptible sleep are
 900                         * limited in their sleep_avg rise as they
 901                         * are likely to be waiting on I/O
 902                         */
 903                        if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) {
 904                                if (p->sleep_avg >= ceiling)
 905                                        sleep_time = 0;
 906                                else if (p->sleep_avg + sleep_time >=
 907                                         ceiling) {
 908                                                p->sleep_avg = ceiling;
 909                                                sleep_time = 0;
 910                                }
 911                        }
 912
 913                        /*
 914                         * This code gives a bonus to interactive tasks.
 915                         *
 916                         * The boost works by updating the 'average sleep time'
 917                         * value here, based on ->timestamp. The more time a
 918                         * task spends sleeping, the higher the average gets -
 919                         * and the higher the priority boost gets as well.
 920                         */
 921                        p->sleep_avg += sleep_time;
 922
 923                }
 924                if (p->sleep_avg > NS_MAX_SLEEP_AVG)
 925                        p->sleep_avg = NS_MAX_SLEEP_AVG;
 926        }
 927
 928        return effective_prio(p);
 929}
 930
 931/*
 932 * activate_task - move a task to the runqueue and do priority recalculation
 933 *
 934 * Update all the scheduling statistics stuff. (sleep average
 935 * calculation, priority modifiers, etc.)
 936 */
 937static void activate_task(struct task_struct *p, struct rq *rq, int local)
 938{
 939        unsigned long long now;
 940
 941        now = sched_clock();
 942#ifdef CONFIG_SMP
 943        if (!local) {
 944                /* Compensate for drifting sched_clock */
 945                struct rq *this_rq = this_rq();
 946                now = (now - this_rq->timestamp_last_tick)
 947                        + rq->timestamp_last_tick;
 948        }
 949#endif
 950
 951        if (!rt_task(p))
 952                p->prio = recalc_task_prio(p, now);
 953
 954        /*
 955         * This checks to make sure it's not an uninterruptible task
 956         * that is now waking up.
 957         */
 958        if (p->sleep_type == SLEEP_NORMAL) {
 959                /*
 960                 * Tasks which were woken up by interrupts (ie. hw events)
 961                 * are most likely of interactive nature. So we give them
 962                 * the credit of extending their sleep time to the period
 963                 * of time they spend on the runqueue, waiting for execution
 964                 * on a CPU, first time around:
 965                 */
 966                if (in_interrupt())
 967                        p->sleep_type = SLEEP_INTERRUPTED;
 968                else {
 969                        /*
 970                         * Normal first-time wakeups get a credit too for
 971                         * on-runqueue time, but it will be weighted down:
 972                         */
 973                        p->sleep_type = SLEEP_INTERACTIVE;
 974                }
 975        }
 976        p->timestamp = now;
 977
 978        __activate_task(p, rq);
 979}
 980
 981/*
 982 * deactivate_task - remove a task from the runqueue.
 983 */
 984static void deactivate_task(struct task_struct *p, struct rq *rq)
 985{
 986        dec_nr_running(p, rq);
 987        dequeue_task(p, p->array);
 988        p->array = NULL;
 989}
 990
 991/*
 992 * resched_task - mark a task 'to be rescheduled now'.
 993 *
 994 * On UP this means the setting of the need_resched flag, on SMP it
 995 * might also involve a cross-CPU call to trigger the scheduler on
 996 * the target CPU.
 997 */
 998#ifdef CONFIG_SMP
 999
1000#ifndef tsk_is_polling
1001#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
1002#endif
1003
1004static void resched_task(struct task_struct *p)
1005{
1006        int cpu;
1007
1008        assert_spin_locked(&task_rq(p)->lock);
1009
1010        if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
1011                return;
1012
1013        set_tsk_thread_flag(p, TIF_NEED_RESCHED);
1014
1015        cpu = task_cpu(p);
1016        if (cpu == smp_processor_id())
1017                return;
1018
1019        /* NEED_RESCHED must be visible before we test polling */
1020        smp_mb();
1021        if (!tsk_is_polling(p))
1022                smp_send_reschedule(cpu);
1023}
1024#else
1025static inline void resched_task(struct task_struct *p)
1026{
1027        assert_spin_locked(&task_rq(p)->lock);
1028        set_tsk_need_resched(p);
1029}
1030#endif
1031
1032/**
1033 * task_curr - is this task currently executing on a CPU?
1034 * @p: the task in question.
1035 */
1036inline int task_curr(const struct task_struct *p)
1037{
1038        return cpu_curr(task_cpu(p)) == p;
1039}
1040
1041/* Used instead of source_load when we know the type == 0 */
1042unsigned long weighted_cpuload(const int cpu)
1043{
1044        return cpu_rq(cpu)->raw_weighted_load;
1045}
1046
1047#ifdef CONFIG_SMP
1048struct migration_req {
1049        struct list_head list;
1050
1051        struct task_struct *task;
1052        int dest_cpu;
1053
1054        struct completion done;
1055};
1056
1057/*
1058 * The task's runqueue lock must be held.
1059 * Returns true if you have to wait for migration thread.
1060 */
1061static int
1062migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
1063{
1064        struct rq *rq = task_rq(p);
1065
1066        /*
1067         * If the task is not on a runqueue (and not running), then
1068         * it is sufficient to simply update the task's cpu field.
1069         */
1070        if (!p->array && !task_running(rq, p)) {
1071                set_task_cpu(p, dest_cpu);
1072                return 0;
1073        }
1074
1075        init_completion(&req->done);
1076        req->task = p;
1077        req->dest_cpu = dest_cpu;
1078        list_add(&req->list, &rq->migration_queue);
1079
1080        return 1;
1081}
1082
1083/*
1084 * wait_task_inactive - wait for a thread to unschedule.
1085 *
1086 * The caller must ensure that the task *will* unschedule sometime soon,
1087 * else this function might spin for a *long* time. This function can't
1088 * be called with interrupts off, or it may introduce deadlock with
1089 * smp_call_function() if an IPI is sent by the same process we are
1090 * waiting to become inactive.
1091 */
1092void wait_task_inactive(struct task_struct *p)
1093{
1094        unsigned long flags;
1095        struct rq *rq;
1096        int preempted;
1097
1098repeat:
1099        rq = task_rq_lock(p, &flags);
1100        /* Must be off runqueue entirely, not preempted. */
1101        if (unlikely(p->array || task_running(rq, p))) {
1102                /* If it's preempted, we yield.  It could be a while. */
1103                preempted = !task_running(rq, p);
1104                task_rq_unlock(rq, &flags);
1105                cpu_relax();
1106                if (preempted)
1107                        yield();
1108                goto repeat;
1109        }
1110        task_rq_unlock(rq, &flags);
1111}
1112
1113/***
1114 * kick_process - kick a running thread to enter/exit the kernel
1115 * @p: the to-be-kicked thread
1116 *
1117 * Cause a process which is running on another CPU to enter
1118 * kernel-mode, without any delay. (to get signals handled.)
1119 *
1120 * NOTE: this function doesnt have to take the runqueue lock,
1121 * because all it wants to ensure is that the remote task enters
1122 * the kernel. If the IPI races and the task has been migrated
1123 * to another CPU then no harm is done and the purpose has been
1124 * achieved as well.
1125 */
1126void kick_process(struct task_struct *p)
1127{
1128        int cpu;
1129
1130        preempt_disable();
1131        cpu = task_cpu(p);
1132        if ((cpu != smp_processor_id()) && task_curr(p))
1133                smp_send_reschedule(cpu);
1134        preempt_enable();
1135}
1136
1137/*
1138 * Return a low guess at the load of a migration-source cpu weighted
1139 * according to the scheduling class and "nice" value.
1140 *
1141 * We want to under-estimate the load of migration sources, to
1142 * balance conservatively.
1143 */
1144static inline unsigned long source_load(int cpu, int type)
1145{
1146        struct rq *rq = cpu_rq(cpu);
1147
1148        if (type == 0)
1149                return rq->raw_weighted_load;
1150
1151        return min(rq->cpu_load[type-1], rq->raw_weighted_load);
1152}
1153
1154/*
1155 * Return a high guess at the load of a migration-target cpu weighted
1156 * according to the scheduling class and "nice" value.
1157 */
1158static inline unsigned long target_load(int cpu, int type)
1159{
1160        struct rq *rq = cpu_rq(cpu);
1161
1162        if (type == 0)
1163                return rq->raw_weighted_load;
1164
1165        return max(rq->cpu_load[type-1], rq->raw_weighted_load);
1166}
1167
1168/*
1169 * Return the average load per task on the cpu's run queue
1170 */
1171static inline unsigned long cpu_avg_load_per_task(int cpu)
1172{
1173        struct rq *rq = cpu_rq(cpu);
1174        unsigned long n = rq->nr_running;
1175
1176        return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE;
1177}
1178
1179/*
1180 * find_idlest_group finds and returns the least busy CPU group within the
1181 * domain.
1182 */
1183static struct sched_group *
1184find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
1185{
1186        struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
1187        unsigned long min_load = ULONG_MAX, this_load = 0;
1188        int load_idx = sd->forkexec_idx;
1189        int imbalance = 100 + (sd->imbalance_pct-100)/2;
1190
1191        do {
1192                unsigned long load, avg_load;
1193                int local_group;
1194                int i;
1195
1196                /* Skip over this group if it has no CPUs allowed */
1197                if (!cpus_intersects(group->cpumask, p->cpus_allowed))
1198                        goto nextgroup;
1199
1200                local_group = cpu_isset(this_cpu, group->cpumask);
1201
1202                /* Tally up the load of all CPUs in the group */
1203                avg_load = 0;
1204
1205                for_each_cpu_mask(i, group->cpumask) {
1206                        /* Bias balancing toward cpus of our domain */
1207                        if (local_group)
1208                                load = source_load(i, load_idx);
1209                        else
1210                                load = target_load(i, load_idx);
1211
1212                        avg_load += load;
1213                }
1214
1215                /* Adjust by relative CPU power of the group */
1216                avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
1217
1218                if (local_group) {
1219                        this_load = avg_load;
1220                        this = group;
1221                } else if (avg_load < min_load) {
1222                        min_load = avg_load;
1223                        idlest = group;
1224                }
1225nextgroup:
1226                group = group->next;
1227        } while (group != sd->groups);
1228
1229        if (!idlest || 100*this_load < imbalance*min_load)
1230                return NULL;
1231        return idlest;
1232}
1233
1234/*
1235 * find_idlest_cpu - find the idlest cpu among the cpus in group.
1236 */
1237static int
1238find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1239{
1240        cpumask_t tmp;
1241        unsigned long load, min_load = ULONG_MAX;
1242        int idlest = -1;
1243        int i;
1244
1245        /* Traverse only the allowed CPUs */
1246        cpus_and(tmp, group->cpumask, p->cpus_allowed);
1247
1248        for_each_cpu_mask(i, tmp) {
1249                load = weighted_cpuload(i);
1250
1251                if (load < min_load || (load == min_load && i == this_cpu)) {
1252                        min_load = load;
1253                        idlest = i;
1254                }
1255        }
1256
1257        return idlest;
1258}
1259
1260/*
1261 * sched_balance_self: balance the current task (running on cpu) in domains
1262 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
1263 * SD_BALANCE_EXEC.
1264 *
1265 * Balance, ie. select the least loaded group.
1266 *
1267 * Returns the target CPU number, or the same CPU if no balancing is needed.
1268 *
1269 * preempt must be disabled.
1270 */
1271static int sched_balance_self(int cpu, int flag)
1272{
1273        struct task_struct *t = current;
1274        struct sched_domain *tmp, *sd = NULL;
1275
1276        for_each_domain(cpu, tmp) {
1277                /*
1278                 * If power savings logic is enabled for a domain, stop there.
1279                 */
1280                if (tmp->flags & SD_POWERSAVINGS_BALANCE)
1281                        break;
1282                if (tmp->flags & flag)
1283                        sd = tmp;
1284        }
1285
1286        while (sd) {
1287                cpumask_t span;
1288                struct sched_group *group;
1289                int new_cpu, weight;
1290
1291                if (!(sd->flags & flag)) {
1292                        sd = sd->child;
1293                        continue;
1294                }
1295
1296                span = sd->span;
1297                group = find_idlest_group(sd, t, cpu);
1298                if (!group) {
1299                        sd = sd->child;
1300                        continue;
1301                }
1302
1303                new_cpu = find_idlest_cpu(group, t, cpu);
1304                if (new_cpu == -1 || new_cpu == cpu) {
1305                        /* Now try balancing at a lower domain level of cpu */
1306                        sd = sd->child;
1307                        continue;
1308                }
1309
1310                /* Now try balancing at a lower domain level of new_cpu */
1311                cpu = new_cpu;
1312                sd = NULL;
1313                weight = cpus_weight(span);
1314                for_each_domain(cpu, tmp) {
1315                        if (weight <= cpus_weight(tmp->span))
1316                                break;
1317                        if (tmp->flags & flag)
1318                                sd = tmp;
1319                }
1320                /* while loop will break here if sd == NULL */
1321        }
1322
1323        return cpu;
1324}
1325
1326#endif /* CONFIG_SMP */
1327
1328/*
1329 * wake_idle() will wake a task on an idle cpu if task->cpu is
1330 * not idle and an idle cpu is available.  The span of cpus to
1331 * search starts with cpus closest then further out as needed,
1332 * so we always favor a closer, idle cpu.
1333 *
1334 * Returns the CPU we should wake onto.
1335 */
1336#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
1337static int wake_idle(int cpu, struct task_struct *p)
1338{
1339        cpumask_t tmp;
1340        struct sched_domain *sd;
1341        int i;
1342
1343        if (idle_cpu(cpu))
1344                return cpu;
1345
1346        for_each_domain(cpu, sd) {
1347                if (sd->flags & SD_WAKE_IDLE) {
1348                        cpus_and(tmp, sd->span, p->cpus_allowed);
1349                        for_each_cpu_mask(i, tmp) {
1350                                if (idle_cpu(i))
1351                                        return i;
1352                        }
1353                }
1354                else
1355                        break;
1356        }
1357        return cpu;
1358}
1359#else
1360static inline int wake_idle(int cpu, struct task_struct *p)
1361{
1362        return cpu;
1363}
1364#endif
1365
1366/***
1367 * try_to_wake_up - wake up a thread
1368 * @p: the to-be-woken-up thread
1369 * @state: the mask of task states that can be woken
1370 * @sync: do a synchronous wakeup?
1371 *
1372 * Put it on the run-queue if it's not already there. The "current"
1373 * thread is always on the run-queue (except when the actual
1374 * re-schedule is in progress), and as such you're allowed to do
1375 * the simpler "current->state = TASK_RUNNING" to mark yourself
1376 * runnable without the overhead of this.
1377 *
1378 * returns failure only if the task is already active.
1379 */
1380static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1381{
1382        int cpu, this_cpu, success = 0;
1383        unsigned long flags;
1384        long old_state;
1385        struct rq *rq;
1386#ifdef CONFIG_SMP
1387        struct sched_domain *sd, *this_sd = NULL;
1388        unsigned long load, this_load;
1389        int new_cpu;
1390#endif
1391
1392        rq = task_rq_lock(p, &flags);
1393        old_state = p->state;
1394        if (!(old_state & state))
1395                goto out;
1396
1397        if (p->array)
1398                goto out_running;
1399
1400        cpu = task_cpu(p);
1401        this_cpu = smp_processor_id();
1402
1403#ifdef CONFIG_SMP
1404        if (unlikely(task_running(rq, p)))
1405                goto out_activate;
1406
1407        new_cpu = cpu;
1408
1409        schedstat_inc(rq, ttwu_cnt);
1410        if (cpu == this_cpu) {
1411                schedstat_inc(rq, ttwu_local);
1412                goto out_set_cpu;
1413        }
1414
1415        for_each_domain(this_cpu, sd) {
1416                if (cpu_isset(cpu, sd->span)) {
1417                        schedstat_inc(sd, ttwu_wake_remote);
1418                        this_sd = sd;
1419                        break;
1420                }
1421        }
1422
1423        if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
1424                goto out_set_cpu;
1425
1426        /*
1427         * Check for affine wakeup and passive balancing possibilities.
1428         */
1429        if (this_sd) {
1430                int idx = this_sd->wake_idx;
1431                unsigned int imbalance;
1432
1433                imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
1434
1435                load = source_load(cpu, idx);
1436                this_load = target_load(this_cpu, idx);
1437
1438                new_cpu = this_cpu; /* Wake to this CPU if we can */
1439
1440                if (this_sd->flags & SD_WAKE_AFFINE) {
1441                        unsigned long tl = this_load;
1442                        unsigned long tl_per_task = cpu_avg_load_per_task(this_cpu);
1443
1444                        /*
1445                         * If sync wakeup then subtract the (maximum possible)
1446                         * effect of the currently running task from the load
1447                         * of the current CPU:
1448                         */
1449                        if (sync)
1450                                tl -= current->load_weight;
1451
1452                        if ((tl <= load &&
1453                                tl + target_load(cpu, idx) <= tl_per_task) ||
1454                                100*(tl + p->load_weight) <= imbalance*load) {
1455                                /*
1456                                 * This domain has SD_WAKE_AFFINE and
1457                                 * p is cache cold in this domain, and
1458                                 * there is no bad imbalance.
1459                                 */
1460                                schedstat_inc(this_sd, ttwu_move_affine);
1461                                goto out_set_cpu;
1462                        }
1463                }
1464
1465                /*
1466                 * Start passive balancing when half the imbalance_pct
1467                 * limit is reached.
1468                 */
1469                if (this_sd->flags & SD_WAKE_BALANCE) {
1470                        if (imbalance*this_load <= 100*load) {
1471                                schedstat_inc(this_sd, ttwu_move_balance);
1472                                goto out_set_cpu;
1473                        }
1474                }
1475        }
1476
1477        new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
1478out_set_cpu:
1479        new_cpu = wake_idle(new_cpu, p);
1480        if (new_cpu != cpu) {
1481                set_task_cpu(p, new_cpu);
1482                task_rq_unlock(rq, &flags);
1483                /* might preempt at this point */
1484                rq = task_rq_lock(p, &flags);
1485                old_state = p->state;
1486                if (!(old_state & state))
1487                        goto out;
1488                if (p->array)
1489                        goto out_running;
1490
1491                this_cpu = smp_processor_id();
1492                cpu = task_cpu(p);
1493        }
1494
1495out_activate:
1496#endif /* CONFIG_SMP */
1497        if (old_state == TASK_UNINTERRUPTIBLE) {
1498                rq->nr_uninterruptible--;
1499                /*
1500                 * Tasks on involuntary sleep don't earn
1501                 * sleep_avg beyond just interactive state.
1502                 */
1503                p->sleep_type = SLEEP_NONINTERACTIVE;
1504        } else
1505
1506        /*
1507         * Tasks that have marked their sleep as noninteractive get
1508         * woken up with their sleep average not weighted in an
1509         * interactive way.
1510         */
1511                if (old_state & TASK_NONINTERACTIVE)
1512                        p->sleep_type = SLEEP_NONINTERACTIVE;
1513
1514
1515        activate_task(p, rq, cpu == this_cpu);
1516        /*
1517         * Sync wakeups (i.e. those types of wakeups where the waker
1518         * has indicated that it will leave the CPU in short order)
1519         * don't trigger a preemption, if the woken up task will run on
1520         * this cpu. (in this case the 'I will reschedule' promise of
1521         * the waker guarantees that the freshly woken up task is going
1522         * to be considered on this CPU.)
1523         */
1524        if (!sync || cpu != this_cpu) {
1525                if (TASK_PREEMPTS_CURR(p, rq))
1526                        resched_task(rq->curr);
1527        }
1528        success = 1;
1529
1530out_running:
1531        p->state = TASK_RUNNING;
1532out:
1533        task_rq_unlock(rq, &flags);
1534
1535        return success;
1536}
1537
1538int fastcall wake_up_process(struct task_struct *p)
1539{
1540        return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
1541                                 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
1542}
1543EXPORT_SYMBOL(wake_up_process);
1544
1545int fastcall wake_up_state(struct task_struct *p, unsigned int state)
1546{
1547        return try_to_wake_up(p, state, 0);
1548}
1549
1550/*
1551 * Perform scheduler related setup for a newly forked process p.
1552 * p is forked by current.
1553 */
1554void fastcall sched_fork(struct task_struct *p, int clone_flags)
1555{
1556        int cpu = get_cpu();
1557
1558#ifdef CONFIG_SMP
1559        cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
1560#endif
1561        set_task_cpu(p, cpu);
1562
1563        /*
1564         * We mark the process as running here, but have not actually
1565         * inserted it onto the runqueue yet. This guarantees that
1566         * nobody will actually run it, and a signal or other external
1567         * event cannot wake it up and insert it on the runqueue either.
1568         */
1569        p->state = TASK_RUNNING;
1570
1571        /*
1572         * Make sure we do not leak PI boosting priority to the child:
1573         */
1574        p->prio = current->normal_prio;
1575
1576        INIT_LIST_HEAD(&p->run_list);
1577        p->array = NULL;
1578#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1579        if (unlikely(sched_info_on()))
1580                memset(&p->sched_info, 0, sizeof(p->sched_info));
1581#endif
1582#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
1583        p->oncpu = 0;
1584#endif
1585#ifdef CONFIG_PREEMPT
1586        /* Want to start with kernel preemption disabled. */
1587        task_thread_info(p)->preempt_count = 1;
1588#endif
1589        /*
1590         * Share the timeslice between parent and child, thus the
1591         * total amount of pending timeslices in the system doesn't change,
1592         * resulting in more scheduling fairness.
1593         */
1594        local_irq_disable();
1595        p->time_slice = (current->time_slice + 1) >> 1;
1596        /*
1597         * The remainder of the first timeslice might be recovered by
1598         * the parent if the child exits early enough.
1599         */
1600        p->first_time_slice = 1;
1601        current->time_slice >>= 1;
1602        p->timestamp = sched_clock();
1603        if (unlikely(!current->time_slice)) {
1604                /*
1605                 * This case is rare, it happens when the parent has only
1606                 * a single jiffy left from its timeslice. Taking the
1607                 * runqueue lock is not a problem.
1608                 */
1609                current->time_slice = 1;
1610                scheduler_tick();
1611        }
1612        local_irq_enable();
1613        put_cpu();
1614}
1615
1616/*
1617 * wake_up_new_task - wake up a newly created task for the first time.
1618 *
1619 * This function will do some initial scheduler statistics housekeeping
1620 * that must be done for every newly created context, then puts the task
1621 * on the runqueue and wakes it.
1622 */
1623void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1624{
1625        struct rq *rq, *this_rq;
1626        unsigned long flags;
1627        int this_cpu, cpu;
1628
1629        rq = task_rq_lock(p, &flags);
1630        BUG_ON(p->state != TASK_RUNNING);
1631        this_cpu = smp_processor_id();
1632        cpu = task_cpu(p);
1633
1634        /*
1635         * We decrease the sleep average of forking parents
1636         * and children as well, to keep max-interactive tasks
1637         * from forking tasks that are max-interactive. The parent
1638         * (current) is done further down, under its lock.
1639         */
1640        p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *
1641                CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
1642
1643        p->prio = effective_prio(p);
1644
1645        if (likely(cpu == this_cpu)) {
1646                if (!(clone_flags & CLONE_VM)) {
1647                        /*
1648                         * The VM isn't cloned, so we're in a good position to
1649                         * do child-runs-first in anticipation of an exec. This
1650                         * usually avoids a lot of COW overhead.
1651                         */
1652                        if (unlikely(!current->array))
1653                                __activate_task(p, rq);
1654                        else {
1655                                p->prio = current->prio;
1656                                p->normal_prio = current->normal_prio;
1657                                list_add_tail(&p->run_list, &current->run_list);
1658                                p->array = current->array;
1659                                p->array->nr_active++;
1660                                inc_nr_running(p, rq);
1661                        }
1662                        set_need_resched();
1663                } else
1664                        /* Run child last */
1665                        __activate_task(p, rq);
1666                /*
1667                 * We skip the following code due to cpu == this_cpu
1668                 *
1669                 *   task_rq_unlock(rq, &flags);
1670                 *   this_rq = task_rq_lock(current, &flags);
1671                 */
1672                this_rq = rq;
1673        } else {
1674                this_rq = cpu_rq(this_cpu);
1675
1676                /*
1677                 * Not the local CPU - must adjust timestamp. This should
1678                 * get optimised away in the !CONFIG_SMP case.
1679                 */
1680                p->timestamp = (p->timestamp - this_rq->timestamp_last_tick)
1681                                        + rq->timestamp_last_tick;
1682                __activate_task(p, rq);
1683                if (TASK_PREEMPTS_CURR(p, rq))
1684                        resched_task(rq->curr);
1685
1686                /*
1687                 * Parent and child are on different CPUs, now get the
1688                 * parent runqueue to update the parent's ->sleep_avg:
1689                 */
1690                task_rq_unlock(rq, &flags);
1691                this_rq = task_rq_lock(current, &flags);
1692        }
1693        current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
1694                PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
1695        task_rq_unlock(this_rq, &flags);
1696}
1697
1698/*
1699 * Potentially available exiting-child timeslices are
1700 * retrieved here - this way the parent does not get
1701 * penalized for creating too many threads.
1702 *
1703 * (this cannot be used to 'generate' timeslices
1704 * artificially, because any timeslice recovered here
1705 * was given away by the parent in the first place.)
1706 */
1707void fastcall sched_exit(struct task_struct *p)
1708{
1709        unsigned long flags;
1710        struct rq *rq;
1711
1712        /*
1713         * If the child was a (relative-) CPU hog then decrease
1714         * the sleep_avg of the parent as well.
1715         */
1716        rq = task_rq_lock(p->parent, &flags);
1717        if (p->first_time_slice && task_cpu(p) == task_cpu(p->parent)) {
1718                p->parent->time_slice += p->time_slice;
1719                if (unlikely(p->parent->time_slice > task_timeslice(p)))
1720                        p->parent->time_slice = task_timeslice(p);
1721        }
1722        if (p->sleep_avg < p->parent->sleep_avg)
1723                p->parent->sleep_avg = p->parent->sleep_avg /
1724                (EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg /
1725                (EXIT_WEIGHT + 1);
1726        task_rq_unlock(rq, &flags);
1727}
1728
1729/**
1730 * prepare_task_switch - prepare to switch tasks
1731 * @rq: the runqueue preparing to switch
1732 * @next: the task we are going to switch to.
1733 *
1734 * This is called with the rq lock held and interrupts off. It must
1735 * be paired with a subsequent finish_task_switch after the context
1736 * switch.
1737 *
1738 * prepare_task_switch sets up locking and calls architecture specific
1739 * hooks.
1740 */
1741static inline void prepare_task_switch(struct rq *rq, struct task_struct *next)
1742{
1743        prepare_lock_switch(rq, next);
1744        prepare_arch_switch(next);
1745}
1746
1747/**
1748 * finish_task_switch - clean up after a task-switch
1749 * @rq: runqueue associated with task-switch
1750 * @prev: the thread we just switched away from.
1751 *
1752 * finish_task_switch must be called after the context switch, paired
1753 * with a prepare_task_switch call before the context switch.
1754 * finish_task_switch will reconcile locking set up by prepare_task_switch,
1755 * and do any other architecture-specific cleanup actions.
1756 *
1757 * Note that we may have delayed dropping an mm in context_switch(). If
1758 * so, we finish that here outside of the runqueue lock.  (Doing it
1759 * with the lock held can cause deadlocks; see schedule() for
1760 * details.)
1761 */
1762static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
1763        __releases(rq->lock)
1764{
1765        struct mm_struct *mm = rq->prev_mm;
1766        long prev_state;
1767
1768        rq->prev_mm = NULL;
1769
1770        /*
1771         * A task struct has one reference for the use as "current".
1772         * If a task dies, then it sets TASK_DEAD in tsk->state and calls
1773         * schedule one last time. The schedule call will never return, and
1774         * the scheduled task must drop that reference.
1775         * The test for TASK_DEAD must occur while the runqueue locks are
1776         * still held, otherwise prev could be scheduled on another cpu, die
1777         * there before we look at prev->state, and then the reference would
1778         * be dropped twice.
1779         *              Manfred Spraul <manfred@colorfullife.com>
1780         */
1781        prev_state = prev->state;
1782        finish_arch_switch(prev);
1783        finish_lock_switch(rq, prev);
1784        if (mm)
1785                mmdrop(mm);
1786        if (unlikely(prev_state == TASK_DEAD)) {
1787                /*
1788                 * Remove function-return probe instances associated with this
1789                 * task and put them back on the free list.
1790                 */
1791                kprobe_flush_task(prev);
1792                put_task_struct(prev);
1793        }
1794}
1795
1796/**
1797 * schedule_tail - first thing a freshly forked thread must call.
1798 * @prev: the thread we just switched away from.
1799 */
1800asmlinkage void schedule_tail(struct task_struct *prev)
1801        __releases(rq->lock)
1802{
1803        struct rq *rq = this_rq();
1804
1805        finish_task_switch(rq, prev);
1806#ifdef __ARCH_WANT_UNLOCKED_CTXSW
1807        /* In this case, finish_task_switch does not reenable preemption */
1808        preempt_enable();
1809#endif
1810        if (current->set_child_tid)
1811                put_user(current->pid, current->set_child_tid);
1812}
1813
1814/*
1815 * context_switch - switch to the new MM and the new
1816 * thread's register state.
1817 */
1818static inline struct task_struct *
1819context_switch(struct rq *rq, struct task_struct *prev,
1820               struct task_struct *next)
1821{
1822        struct mm_struct *mm = next->mm;
1823        struct mm_struct *oldmm = prev->active_mm;
1824
1825        if (!mm) {
1826                next->active_mm = oldmm;
1827                atomic_inc(&oldmm->mm_count);
1828                enter_lazy_tlb(oldmm, next);
1829        } else
1830                switch_mm(oldmm, mm, next);
1831
1832        if (!prev->mm) {
1833                prev->active_mm = NULL;
1834                WARN_ON(rq->prev_mm);
1835                rq->prev_mm = oldmm;
1836        }
1837        /*
1838         * Since the runqueue lock will be released by the next
1839         * task (which is an invalid locking op but in the case
1840         * of the scheduler it's an obvious special-case), so we
1841         * do an early lockdep release here:
1842         */
1843#ifndef __ARCH_WANT_UNLOCKED_CTXSW
1844        spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
1845#endif
1846
1847        /* Here we just switch the register state and the stack. */
1848        switch_to(prev, next, prev);
1849
1850        return prev;
1851}
1852
1853/*
1854 * nr_running, nr_uninterruptible and nr_context_switches:
1855 *
1856 * externally visible scheduler statistics: current number of runnable
1857 * threads, current number of uninterruptible-sleeping threads, total
1858 * number of context switches performed since bootup.
1859 */
1860unsigned long nr_running(void)
1861{
1862        unsigned long i, sum = 0;
1863
1864        for_each_online_cpu(i)
1865                sum += cpu_rq(i)->nr_running;
1866
1867        return sum;
1868}
1869
1870unsigned long nr_uninterruptible(void)
1871{
1872        unsigned long i, sum = 0;
1873
1874        for_each_possible_cpu(i)
1875                sum += cpu_rq(i)->nr_uninterruptible;
1876
1877        /*
1878         * Since we read the counters lockless, it might be slightly
1879         * inaccurate. Do not allow it to go below zero though:
1880         */
1881        if (unlikely((long)sum < 0))
1882                sum = 0;
1883
1884        return sum;
1885}
1886
1887unsigned long long nr_context_switches(void)
1888{
1889        int i;
1890        unsigned long long sum = 0;
1891
1892        for_each_possible_cpu(i)
1893                sum += cpu_rq(i)->nr_switches;
1894
1895        return sum;
1896}
1897
1898unsigned long nr_iowait(void)
1899{
1900        unsigned long i, sum = 0;
1901
1902        for_each_possible_cpu(i)
1903                sum += atomic_read(&cpu_rq(i)->nr_iowait);
1904
1905        return sum;
1906}
1907
1908unsigned long nr_active(void)
1909{
1910        unsigned long i, running = 0, uninterruptible = 0;
1911
1912        for_each_online_cpu(i) {
1913                running += cpu_rq(i)->nr_running;
1914                uninterruptible += cpu_rq(i)->nr_uninterruptible;
1915        }
1916
1917        if (unlikely((long)uninterruptible < 0))
1918                uninterruptible = 0;
1919
1920        return running + uninterruptible;
1921}
1922
1923#ifdef CONFIG_SMP
1924
1925/*
1926 * Is this task likely cache-hot:
1927 */
1928static inline int
1929task_hot(struct task_struct *p, unsigned long long now, struct sched_domain *sd)
1930{
1931        return (long long)(now - p->last_ran) < (long long)sd->cache_hot_time;
1932}
1933
1934/*
1935 * double_rq_lock - safely lock two runqueues
1936 *
1937 * Note this does not disable interrupts like task_rq_lock,
1938 * you need to do so manually before calling.
1939 */
1940static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1941        __acquires(rq1->lock)
1942        __acquires(rq2->lock)
1943{
1944        if (rq1 == rq2) {
1945                spin_lock(&rq1->lock);
1946                __acquire(rq2->lock);   /* Fake it out ;) */
1947        } else {
1948                if (rq1 < rq2) {
1949                        spin_lock(&rq1->lock);
1950                        spin_lock(&rq2->lock);
1951                } else {
1952                        spin_lock(&rq2->lock);
1953                        spin_lock(&rq1->lock);
1954                }
1955        }
1956}
1957
1958/*
1959 * double_rq_unlock - safely unlock two runqueues
1960 *
1961 * Note this does not restore interrupts like task_rq_unlock,
1962 * you need to do so manually after calling.
1963 */
1964static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1965        __releases(rq1->lock)
1966        __releases(rq2->lock)
1967{
1968        spin_unlock(&rq1->lock);
1969        if (rq1 != rq2)
1970                spin_unlock(&rq2->lock);
1971        else
1972                __release(rq2->lock);
1973}
1974
1975/*
1976 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1977 */
1978static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
1979        __releases(this_rq->lock)
1980        __acquires(busiest->lock)
1981        __acquires(this_rq->lock)
1982{
1983        if (unlikely(!spin_trylock(&busiest->lock))) {
1984                if (busiest < this_rq) {
1985                        spin_unlock(&this_rq->lock);
1986                        spin_lock(&busiest->lock);
1987                        spin_lock(&this_rq->lock);
1988                } else
1989                        spin_lock(&busiest->lock);
1990        }
1991}
1992
1993/*
1994 * If dest_cpu is allowed for this process, migrate the task to it.
1995 * This is accomplished by forcing the cpu_allowed mask to only
1996 * allow dest_cpu, which will force the cpu onto dest_cpu.  Then
1997 * the cpu_allowed mask is restored.
1998 */
1999static void sched_migrate_task(struct task_struct *p, int dest_cpu)
2000{
2001        struct migration_req req;
2002        unsigned long flags;
2003        struct rq *rq;
2004
2005        rq = task_rq_lock(p, &flags);
2006        if (!cpu_isset(dest_cpu, p->cpus_allowed)
2007            || unlikely(cpu_is_offline(dest_cpu)))
2008                goto out;
2009
2010        /* force the process onto the specified CPU */
2011        if (migrate_task(p, dest_cpu, &req)) {
2012                /* Need to wait for migration thread (might exit: take ref). */
2013                struct task_struct *mt = rq->migration_thread;
2014
2015                get_task_struct(mt);
2016                task_rq_unlock(rq, &flags);
2017                wake_up_process(mt);
2018                put_task_struct(mt);
2019                wait_for_completion(&req.done);
2020
2021                return;
2022        }
2023out:
2024        task_rq_unlock(rq, &flags);
2025}
2026
2027/*
2028 * sched_exec - execve() is a valuable balancing opportunity, because at
2029 * this point the task has the smallest effective memory and cache footprint.
2030 */
2031void sched_exec(void)
2032{
2033        int new_cpu, this_cpu = get_cpu();
2034        new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC);
2035        put_cpu();
2036        if (new_cpu != this_cpu)
2037                sched_migrate_task(current, new_cpu);
2038}
2039
2040/*
2041 * pull_task - move a task from a remote runqueue to the local runqueue.
2042 * Both runqueues must be locked.
2043 */
2044static void pull_task(struct rq *src_rq, struct prio_array *src_array,
2045                      struct task_struct *p, struct rq *this_rq,
2046                      struct prio_array *this_array, int this_cpu)
2047{
2048        dequeue_task(p, src_array);
2049        dec_nr_running(p, src_rq);
2050        set_task_cpu(p, this_cpu);
2051        inc_nr_running(p, this_rq);
2052        enqueue_task(p, this_array);
2053        p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
2054                                + this_rq->timestamp_last_tick;
2055        /*
2056         * Note that idle threads have a prio of MAX_PRIO, for this test
2057         * to be always true for them.
2058         */
2059        if (TASK_PREEMPTS_CURR(p, this_rq))
2060                resched_task(this_rq->curr);
2061}
2062
2063/*
2064 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
2065 */
2066static
2067int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2068                     struct sched_domain *sd, enum idle_type idle,
2069                     int *all_pinned)
2070{
2071        /*
2072         * We do not migrate tasks that are:
2073         * 1) running (obviously), or
2074         * 2) cannot be migrated to this CPU due to cpus_allowed, or
2075         * 3) are cache-hot on their current CPU.
2076         */
2077        if (!cpu_isset(this_cpu, p->cpus_allowed))
2078                return 0;
2079        *all_pinned = 0;
2080
2081        if (task_running(rq, p))
2082                return 0;
2083
2084        /*
2085         * Aggressive migration if:
2086         * 1) task is cache cold, or
2087         * 2) too many balance attempts have failed.
2088         */
2089
2090        if (sd->nr_balance_failed > sd->cache_nice_tries)
2091                return 1;
2092
2093        if (task_hot(p, rq->timestamp_last_tick, sd))
2094                return 0;
2095        return 1;
2096}
2097
2098#define rq_best_prio(rq) min((rq)->curr->prio, (rq)->best_expired_prio)
2099
2100/*
2101 * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted
2102 * load from busiest to this_rq, as part of a balancing operation within
2103 * "domain". Returns the number of tasks moved.
2104 *
2105 * Called with both runqueues locked.
2106 */
2107static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2108                      unsigned long max_nr_move, unsigned long max_load_move,
2109                      struct sched_domain *sd, enum idle_type idle,
2110                      int *all_pinned)
2111{
2112        int idx, pulled = 0, pinned = 0, this_best_prio, best_prio,
2113            best_prio_seen, skip_for_load;
2114        struct prio_array *array, *dst_array;
2115        struct list_head *head, *curr;
2116        struct task_struct *tmp;
2117        long rem_load_move;
2118
2119        if (max_nr_move == 0 || max_load_move == 0)
2120                goto out;
2121
2122        rem_load_move = max_load_move;
2123        pinned = 1;
2124        this_best_prio = rq_best_prio(this_rq);
2125        best_prio = rq_best_prio(busiest);
2126        /*
2127         * Enable handling of the case where there is more than one task
2128         * with the best priority.   If the current running task is one
2129         * of those with prio==best_prio we know it won't be moved
2130         * and therefore it's safe to override the skip (based on load) of
2131         * any task we find with that prio.
2132         */
2133        best_prio_seen = best_prio == busiest->curr->prio;
2134
2135        /*
2136         * We first consider expired tasks. Those will likely not be
2137         * executed in the near future, and they are most likely to
2138         * be cache-cold, thus switching CPUs has the least effect
2139         * on them.
2140         */
2141        if (busiest->expired->nr_active) {
2142                array = busiest->expired;
2143                dst_array = this_rq->expired;
2144        } else {
2145                array = busiest->active;
2146                dst_array = this_rq->active;
2147        }
2148
2149new_array:
2150        /* Start searching at priority 0: */
2151        idx = 0;
2152skip_bitmap:
2153        if (!idx)
2154                idx = sched_find_first_bit(array->bitmap);
2155        else
2156                idx = find_next_bit(array->bitmap, MAX_PRIO, idx);
2157        if (idx >= MAX_PRIO) {
2158                if (array == busiest->expired && busiest->active->nr_active) {
2159                        array = busiest->active;
2160                        dst_array = this_rq->active;
2161                        goto new_array;
2162                }
2163                goto out;
2164        }
2165
2166        head = array->queue + idx;
2167        curr = head->prev;
2168skip_queue:
2169        tmp = list_entry(curr, struct task_struct, run_list);
2170
2171        curr = curr->prev;
2172
2173        /*
2174         * To help distribute high priority tasks accross CPUs we don't
2175         * skip a task if it will be the highest priority task (i.e. smallest
2176         * prio value) on its new queue regardless of its load weight
2177         */
2178        skip_for_load = tmp->load_weight > rem_load_move;
2179        if (skip_for_load && idx < this_best_prio)
2180                skip_for_load = !best_prio_seen && idx == best_prio;
2181        if (skip_for_load ||
2182            !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
2183
2184                best_prio_seen |= idx == best_prio;
2185                if (curr != head)
2186                        goto skip_queue;
2187                idx++;
2188                goto skip_bitmap;
2189        }
2190
2191#ifdef CONFIG_SCHEDSTATS
2192        if (task_hot(tmp, busiest->timestamp_last_tick, sd))
2193                schedstat_inc(sd, lb_hot_gained[idle]);
2194#endif
2195
2196        pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
2197        pulled++;
2198        rem_load_move -= tmp->load_weight;
2199
2200        /*
2201         * We only want to steal up to the prescribed number of tasks
2202         * and the prescribed amount of weighted load.
2203         */
2204        if (pulled < max_nr_move && rem_load_move > 0) {
2205                if (idx < this_best_prio)
2206                        this_best_prio = idx;
2207                if (curr != head)
2208                        goto skip_queue;
2209                idx++;
2210                goto skip_bitmap;
2211        }
2212out:
2213        /*
2214         * Right now, this is the only place pull_task() is called,
2215         * so we can safely collect pull_task() stats here rather than
2216         * inside pull_task().
2217         */
2218        schedstat_add(sd, lb_gained[idle], pulled);
2219
2220        if (all_pinned)
2221                *all_pinned = pinned;
2222        return pulled;
2223}
2224
2225/*
2226 * find_busiest_group finds and returns the busiest CPU group within the
2227 * domain. It calculates and returns the amount of weighted load which
2228 * should be moved to restore balance via the imbalance parameter.
2229 */
2230static struct sched_group *
2231find_busiest_group(struct sched_domain *sd, int this_cpu,
2232                   unsigned long *imbalance, enum idle_type idle, int *sd_idle,
2233                   cpumask_t *cpus)
2234{
2235        struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
2236        unsigned long max_load, avg_load, total_load, this_load, total_pwr;
2237        unsigned long max_pull;
2238        unsigned long busiest_load_per_task, busiest_nr_running;
2239        unsigned long this_load_per_task, this_nr_running;
2240        int load_idx;
2241#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2242        int power_savings_balance = 1;
2243        unsigned long leader_nr_running = 0, min_load_per_task = 0;
2244        unsigned long min_nr_running = ULONG_MAX;
2245        struct sched_group *group_min = NULL, *group_leader = NULL;
2246#endif
2247
2248        max_load = this_load = total_load = total_pwr = 0;
2249        busiest_load_per_task = busiest_nr_running = 0;
2250        this_load_per_task = this_nr_running = 0;
2251        if (idle == NOT_IDLE)
2252                load_idx = sd->busy_idx;
2253        else if (idle == NEWLY_IDLE)
2254                load_idx = sd->newidle_idx;
2255        else
2256                load_idx = sd->idle_idx;
2257
2258        do {
2259                unsigned long load, group_capacity;
2260                int local_group;
2261                int i;
2262                unsigned long sum_nr_running, sum_weighted_load;
2263
2264                local_group = cpu_isset(this_cpu, group->cpumask);
2265
2266                /* Tally up the load of all CPUs in the group */
2267                sum_weighted_load = sum_nr_running = avg_load = 0;
2268
2269                for_each_cpu_mask(i, group->cpumask) {
2270                        struct rq *rq;
2271
2272                        if (!cpu_isset(i, *cpus))
2273                                continue;
2274
2275                        rq = cpu_rq(i);
2276
2277                        if (*sd_idle && !idle_cpu(i))
2278                                *sd_idle = 0;
2279
2280                        /* Bias balancing toward cpus of our domain */
2281                        if (local_group)
2282                                load = target_load(i, load_idx);
2283                        else
2284                                load = source_load(i, load_idx);
2285
2286                        avg_load += load;
2287                        sum_nr_running += rq->nr_running;
2288                        sum_weighted_load += rq->raw_weighted_load;
2289                }
2290
2291                total_load += avg_load;
2292                total_pwr += group->cpu_power;
2293
2294                /* Adjust by relative CPU power of the group */
2295                avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
2296
2297                group_capacity = group->cpu_power / SCHED_LOAD_SCALE;
2298
2299                if (local_group) {
2300                        this_load = avg_load;
2301                        this = group;
2302                        this_nr_running = sum_nr_running;
2303                        this_load_per_task = sum_weighted_load;
2304                } else if (avg_load > max_load &&
2305                           sum_nr_running > group_capacity) {
2306                        max_load = avg_load;
2307                        busiest = group;
2308                        busiest_nr_running = sum_nr_running;
2309                        busiest_load_per_task = sum_weighted_load;
2310                }
2311
2312#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2313                /*
2314                 * Busy processors will not participate in power savings
2315                 * balance.
2316                 */
2317                if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
2318                        goto group_next;
2319
2320                /*
2321                 * If the local group is idle or completely loaded
2322                 * no need to do power savings balance at this domain
2323                 */
2324                if (local_group && (this_nr_running >= group_capacity ||
2325                                    !this_nr_running))
2326                        power_savings_balance = 0;
2327
2328                /*
2329                 * If a group is already running at full capacity or idle,
2330                 * don't include that group in power savings calculations
2331                 */
2332                if (!power_savings_balance || sum_nr_running >= group_capacity
2333                    || !sum_nr_running)
2334                        goto group_next;
2335
2336                /*
2337                 * Calculate the group which has the least non-idle load.
2338                 * This is the group from where we need to pick up the load
2339                 * for saving power
2340                 */
2341                if ((sum_nr_running < min_nr_running) ||
2342                    (sum_nr_running == min_nr_running &&
2343                     first_cpu(group->cpumask) <
2344                     first_cpu(group_min->cpumask))) {
2345                        group_min = group;
2346                        min_nr_running = sum_nr_running;
2347                        min_load_per_task = sum_weighted_load /
2348                                                sum_nr_running;
2349                }
2350
2351                /*
2352                 * Calculate the group which is almost near its
2353                 * capacity but still has some space to pick up some load
2354                 * from other group and save more power
2355                 */
2356                if (sum_nr_running <= group_capacity - 1) {
2357                        if (sum_nr_running > leader_nr_running ||
2358                            (sum_nr_running == leader_nr_running &&
2359                             first_cpu(group->cpumask) >
2360                              first_cpu(group_leader->cpumask))) {
2361                                group_leader = group;
2362                                leader_nr_running = sum_nr_running;
2363                        }
2364                }
2365group_next:
2366#endif
2367                group = group->next;
2368        } while (group != sd->groups);
2369
2370        if (!busiest || this_load >= max_load || busiest_nr_running == 0)
2371                goto out_balanced;
2372
2373        avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
2374
2375        if (this_load >= avg_load ||
2376                        100*max_load <= sd->imbalance_pct*this_load)
2377                goto out_balanced;
2378
2379        busiest_load_per_task /= busiest_nr_running;
2380        /*
2381         * We're trying to get all the cpus to the average_load, so we don't
2382         * want to push ourselves above the average load, nor do we wish to
2383         * reduce the max loaded cpu below the average load, as either of these
2384         * actions would just result in more rebalancing later, and ping-pong
2385         * tasks around. Thus we look for the minimum possible imbalance.
2386         * Negative imbalances (*we* are more loaded than anyone else) will
2387         * be counted as no imbalance for these purposes -- we can't fix that
2388         * by pulling tasks to us.  Be careful of negative numbers as they'll
2389         * appear as very large values with unsigned longs.
2390         */
2391        if (max_load <= busiest_load_per_task)
2392                goto out_balanced;
2393
2394        /*
2395         * In the presence of smp nice balancing, certain scenarios can have
2396         * max load less than avg load(as we skip the groups at or below
2397         * its cpu_power, while calculating max_load..)
2398         */
2399        if (max_load < avg_load) {
2400                *imbalance = 0;
2401                goto small_imbalance;
2402        }
2403
2404        /* Don't want to pull so many tasks that a group would go idle */
2405        max_pull = min(max_load - avg_load, max_load - busiest_load_per_task);
2406
2407        /* How much load to actually move to equalise the imbalance */
2408        *imbalance = min(max_pull * busiest->cpu_power,
2409                                (avg_load - this_load) * this->cpu_power)
2410                        / SCHED_LOAD_SCALE;
2411
2412        /*
2413         * if *imbalance is less than the average load per runnable task
2414         * there is no gaurantee that any tasks will be moved so we'll have
2415         * a think about bumping its value to force at least one task to be
2416         * moved
2417         */
2418        if (*imbalance < busiest_load_per_task) {
2419                unsigned long tmp, pwr_now, pwr_move;
2420                unsigned int imbn;
2421
2422small_imbalance:
2423                pwr_move = pwr_now = 0;
2424                imbn = 2;
2425                if (this_nr_running) {
2426                        this_load_per_task /= this_nr_running;
2427                        if (busiest_load_per_task > this_load_per_task)
2428                                imbn = 1;
2429                } else
2430                        this_load_per_task = SCHED_LOAD_SCALE;
2431
2432                if (max_load - this_load >= busiest_load_per_task * imbn) {
2433                        *imbalance = busiest_load_per_task;
2434                        return busiest;
2435                }
2436
2437                /*
2438                 * OK, we don't have enough imbalance to justify moving tasks,
2439                 * however we may be able to increase total CPU power used by
2440                 * moving them.
2441                 */
2442
2443                pwr_now += busiest->cpu_power *
2444                        min(busiest_load_per_task, max_load);
2445                pwr_now += this->cpu_power *
2446                        min(this_load_per_task, this_load);
2447                pwr_now /= SCHED_LOAD_SCALE;
2448
2449                /* Amount of load we'd subtract */
2450                tmp = busiest_load_per_task*SCHED_LOAD_SCALE/busiest->cpu_power;
2451                if (max_load > tmp)
2452                        pwr_move += busiest->cpu_power *
2453                                min(busiest_load_per_task, max_load - tmp);
2454
2455                /* Amount of load we'd add */
2456                if (max_load*busiest->cpu_power <
2457                                busiest_load_per_task*SCHED_LOAD_SCALE)
2458                        tmp = max_load*busiest->cpu_power/this->cpu_power;
2459                else
2460                        tmp = busiest_load_per_task*SCHED_LOAD_SCALE/this->cpu_power;
2461                pwr_move += this->cpu_power*min(this_load_per_task, this_load + tmp);
2462                pwr_move /= SCHED_LOAD_SCALE;
2463
2464                /* Move if we gain throughput */
2465                if (pwr_move <= pwr_now)
2466                        goto out_balanced;
2467
2468                *imbalance = busiest_load_per_task;
2469        }
2470
2471        return busiest;
2472
2473out_balanced:
2474#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2475        if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
2476                goto ret;
2477
2478        if (this == group_leader && group_leader != group_min) {
2479                *imbalance = min_load_per_task;
2480                return group_min;
2481        }
2482ret:
2483#endif
2484        *imbalance = 0;
2485        return NULL;
2486}
2487
2488/*
2489 * find_busiest_queue - find the busiest runqueue among the cpus in group.
2490 */
2491static struct rq *
2492find_busiest_queue(struct sched_group *group, enum idle_type idle,
2493                   unsigned long imbalance, cpumask_t *cpus)
2494{
2495        struct rq *busiest = NULL, *rq;
2496        unsigned long max_load = 0;
2497        int i;
2498
2499        for_each_cpu_mask(i, group->cpumask) {
2500
2501                if (!cpu_isset(i, *cpus))
2502                        continue;
2503
2504                rq = cpu_rq(i);
2505
2506                if (rq->nr_running == 1 && rq->raw_weighted_load > imbalance)
2507                        continue;
2508
2509                if (rq->raw_weighted_load > max_load) {
2510                        max_load = rq->raw_weighted_load;
2511                        busiest = rq;
2512                }
2513        }
2514
2515        return busiest;
2516}
2517
2518/*
2519 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
2520 * so long as it is large enough.
2521 */
2522#define MAX_PINNED_INTERVAL     512
2523
2524static inline unsigned long minus_1_or_zero(unsigned long n)
2525{
2526        return n > 0 ? n - 1 : 0;
2527}
2528
2529/*
2530 * Check this_cpu to ensure it is balanced within domain. Attempt to move
2531 * tasks if there is an imbalance.
2532 *
2533 * Called with this_rq unlocked.
2534 */
2535static int load_balance(int this_cpu, struct rq *this_rq,
2536                        struct sched_domain *sd, enum idle_type idle)
2537{
2538        int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
2539        struct sched_group *group;
2540        unsigned long imbalance;
2541        struct rq *busiest;
2542        cpumask_t cpus = CPU_MASK_ALL;
2543
2544        /*
2545         * When power savings policy is enabled for the parent domain, idle
2546         * sibling can pick up load irrespective of busy siblings. In this case,
2547         * let the state of idle sibling percolate up as IDLE, instead of
2548         * portraying it as NOT_IDLE.
2549         */
2550        if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
2551            !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2552                sd_idle = 1;
2553
2554        schedstat_inc(sd, lb_cnt[idle]);
2555
2556redo:
2557        group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
2558                                                        &cpus);
2559        if (!group) {
2560                schedstat_inc(sd, lb_nobusyg[idle]);
2561                goto out_balanced;
2562        }
2563
2564        busiest = find_busiest_queue(group, idle, imbalance, &cpus);
2565        if (!busiest) {
2566                schedstat_inc(sd, lb_nobusyq[idle]);
2567                goto out_balanced;
2568        }
2569
2570        BUG_ON(busiest == this_rq);
2571
2572        schedstat_add(sd, lb_imbalance[idle], imbalance);
2573
2574        nr_moved = 0;
2575        if (busiest->nr_running > 1) {
2576                /*
2577                 * Attempt to move tasks. If find_busiest_group has found
2578                 * an imbalance but busiest->nr_running <= 1, the group is
2579                 * still unbalanced. nr_moved simply stays zero, so it is
2580                 * correctly treated as an imbalance.
2581                 */
2582                double_rq_lock(this_rq, busiest);
2583                nr_moved = move_tasks(this_rq, this_cpu, busiest,
2584                                      minus_1_or_zero(busiest->nr_running),
2585                                      imbalance, sd, idle, &all_pinned);
2586                double_rq_unlock(this_rq, busiest);
2587
2588                /* All tasks on this runqueue were pinned by CPU affinity */
2589                if (unlikely(all_pinned)) {
2590                        cpu_clear(cpu_of(busiest), cpus);
2591                        if (!cpus_empty(cpus))
2592                                goto redo;
2593                        goto out_balanced;
2594                }
2595        }
2596
2597        if (!nr_moved) {
2598                schedstat_inc(sd, lb_failed[idle]);
2599                sd->nr_balance_failed++;
2600
2601                if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
2602
2603                        spin_lock(&busiest->lock);
2604
2605                        /* don't kick the migration_thread, if the curr
2606                         * task on busiest cpu can't be moved to this_cpu
2607                         */
2608                        if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
2609                                spin_unlock(&busiest->lock);
2610                                all_pinned = 1;
2611                                goto out_one_pinned;
2612                        }
2613
2614                        if (!busiest->active_balance) {
2615                                busiest->active_balance = 1;
2616                                busiest->push_cpu = this_cpu;
2617                                active_balance = 1;
2618                        }
2619                        spin_unlock(&busiest->lock);
2620                        if (active_balance)
2621                                wake_up_process(busiest->migration_thread);
2622
2623                        /*
2624                         * We've kicked active balancing, reset the failure
2625                         * counter.
2626                         */
2627                        sd->nr_balance_failed = sd->cache_nice_tries+1;
2628                }
2629        } else
2630                sd->nr_balance_failed = 0;
2631
2632        if (likely(!active_balance)) {
2633                /* We were unbalanced, so reset the balancing interval */
2634                sd->balance_interval = sd->min_interval;
2635        } else {
2636                /*
2637                 * If we've begun active balancing, start to back off. This
2638                 * case may not be covered by the all_pinned logic if there
2639                 * is only 1 task on the busy runqueue (because we don't call
2640                 * move_tasks).
2641                 */
2642                if (sd->balance_interval < sd->max_interval)
2643                        sd->balance_interval *= 2;
2644        }
2645
2646        if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
2647            !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2648                return -1;
2649        return nr_moved;
2650
2651out_balanced:
2652        schedstat_inc(sd, lb_balanced[idle]);
2653
2654        sd->nr_balance_failed = 0;
2655
2656out_one_pinned:
2657        /* tune up the balancing interval */
2658        if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
2659                        (sd->balance_interval < sd->max_interval))
2660                sd->balance_interval *= 2;
2661
2662        if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
2663            !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2664                return -1;
2665        return 0;
2666}
2667
2668/*
2669 * Check this_cpu to ensure it is balanced within domain. Attempt to move
2670 * tasks if there is an imbalance.
2671 *
2672 * Called from schedule when this_rq is about to become idle (NEWLY_IDLE).
2673 * this_rq is locked.
2674 */
2675static int
2676load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
2677{
2678        struct sched_group *group;
2679        struct rq *busiest = NULL;
2680        unsigned long imbalance;
2681        int nr_moved = 0;
2682        int sd_idle = 0;
2683        cpumask_t cpus = CPU_MASK_ALL;
2684
2685        /*
2686         * When power savings policy is enabled for the parent domain, idle
2687         * sibling can pick up load irrespective of busy siblings. In this case,
2688         * let the state of idle sibling percolate up as IDLE, instead of
2689         * portraying it as NOT_IDLE.
2690         */
2691        if (sd->flags & SD_SHARE_CPUPOWER &&
2692            !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2693                sd_idle = 1;
2694
2695        schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
2696redo:
2697        group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE,
2698                                &sd_idle, &cpus);
2699        if (!group) {
2700                schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
2701                goto out_balanced;
2702        }
2703
2704        busiest = find_busiest_queue(group, NEWLY_IDLE, imbalance,
2705                                &cpus);
2706        if (!busiest) {
2707                schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
2708                goto out_balanced;
2709        }
2710
2711        BUG_ON(busiest == this_rq);
2712
2713        schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance);
2714
2715        nr_moved = 0;
2716        if (busiest->nr_running > 1) {
2717                /* Attempt to move tasks */
2718                double_lock_balance(this_rq, busiest);
2719                nr_moved = move_tasks(this_rq, this_cpu, busiest,
2720                                        minus_1_or_zero(busiest->nr_running),
2721                                        imbalance, sd, NEWLY_IDLE, NULL);
2722                spin_unlock(&busiest->lock);
2723
2724                if (!nr_moved) {
2725                        cpu_clear(cpu_of(busiest), cpus);
2726                        if (!cpus_empty(cpus))
2727                                goto redo;
2728                }
2729        }
2730
2731        if (!nr_moved) {
2732                schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
2733                if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
2734                    !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2735                        return -1;
2736        } else
2737                sd->nr_balance_failed = 0;
2738
2739        return nr_moved;
2740
2741out_balanced:
2742        schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
2743        if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
2744            !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2745                return -1;
2746        sd->nr_balance_failed = 0;
2747
2748        return 0;
2749}
2750
2751/*
2752 * idle_balance is called by schedule() if this_cpu is about to become
2753 * idle. Attempts to pull tasks from other CPUs.
2754 */
2755static void idle_balance(int this_cpu, struct rq *this_rq)
2756{
2757        struct sched_domain *sd;
2758
2759        for_each_domain(this_cpu, sd) {
2760                if (sd->flags & SD_BALANCE_NEWIDLE) {
2761                        /* If we've pulled tasks over stop searching: */
2762                        if (load_balance_newidle(this_cpu, this_rq, sd))
2763                                break;
2764                }
2765        }
2766}
2767
2768/*
2769 * active_load_balance is run by migration threads. It pushes running tasks
2770 * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
2771 * running on each physical CPU where possible, and avoids physical /
2772 * logical imbalances.
2773 *
2774 * Called with busiest_rq locked.
2775 */
2776static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
2777{
2778        int target_cpu = busiest_rq->push_cpu;
2779        struct sched_domain *sd;
2780        struct rq *target_rq;
2781
2782        /* Is there any task to move? */
2783        if (busiest_rq->nr_running <= 1)
2784                return;
2785
2786        target_rq = cpu_rq(target_cpu);
2787
2788        /*
2789         * This condition is "impossible", if it occurs
2790         * we need to fix it.  Originally reported by
2791         * Bjorn Helgaas on a 128-cpu setup.
2792         */
2793        BUG_ON(busiest_rq == target_rq);
2794
2795        /* move a task from busiest_rq to target_rq */
2796        double_lock_balance(busiest_rq, target_rq);
2797
2798        /* Search for an sd spanning us and the target CPU. */
2799        for_each_domain(target_cpu, sd) {
2800                if ((sd->flags & SD_LOAD_BALANCE) &&
2801                    cpu_isset(busiest_cpu, sd->span))
2802                                break;
2803        }
2804
2805        if (likely(sd)) {
2806                schedstat_inc(sd, alb_cnt);
2807
2808                if (move_tasks(target_rq, target_cpu, busiest_rq, 1,
2809                               RTPRIO_TO_LOAD_WEIGHT(100), sd, SCHED_IDLE,
2810                               NULL))
2811                        schedstat_inc(sd, alb_pushed);
2812                else
2813                        schedstat_inc(sd, alb_failed);
2814        }
2815        spin_unlock(&target_rq->lock);
2816}
2817
2818/*
2819 * rebalance_tick will get called every timer tick, on every CPU.
2820 *
2821 * It checks each scheduling domain to see if it is due to be balanced,
2822 * and initiates a balancing operation if so.
2823 *
2824 * Balancing parameters are set up in arch_init_sched_domains.
2825 */
2826
2827/* Don't have all balancing operations going off at once: */
2828static inline unsigned long cpu_offset(int cpu)
2829{
2830        return jiffies + cpu * HZ / NR_CPUS;
2831}
2832
2833static void
2834rebalance_tick(int this_cpu, struct rq *this_rq, enum idle_type idle)
2835{
2836        unsigned long this_load, interval, j = cpu_offset(this_cpu);
2837        struct sched_domain *sd;
2838        int i, scale;
2839
2840        this_load = this_rq->raw_weighted_load;
2841
2842        /* Update our load: */
2843        for (i = 0, scale = 1; i < 3; i++, scale <<= 1) {
2844                unsigned long old_load, new_load;
2845
2846                old_load = this_rq->cpu_load[i];
2847                new_load = this_load;
2848                /*
2849                 * Round up the averaging division if load is increasing. This
2850                 * prevents us from getting stuck on 9 if the load is 10, for
2851                 * example.
2852                 */
2853                if (new_load > old_load)
2854                        new_load += scale-1;
2855                this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) / scale;
2856        }
2857
2858        for_each_domain(this_cpu, sd) {
2859                if (!(sd->flags & SD_LOAD_BALANCE))
2860                        continue;
2861
2862                interval = sd->balance_interval;
2863                if (idle != SCHED_IDLE)
2864                        interval *= sd->busy_factor;
2865
2866                /* scale ms to jiffies */
2867                interval = msecs_to_jiffies(interval);
2868                if (unlikely(!interval))
2869                        interval = 1;
2870
2871                if (j - sd->last_balance >= interval) {
2872                        if (load_balance(this_cpu, this_rq, sd, idle)) {
2873                                /*
2874                                 * We've pulled tasks over so either we're no
2875                                 * longer idle, or one of our SMT siblings is
2876                                 * not idle.
2877                                 */
2878                                idle = NOT_IDLE;
2879                        }
2880                        sd->last_balance += interval;
2881                }
2882        }
2883}
2884#else
2885/*
2886 * on UP we do not need to balance between CPUs:
2887 */
2888static inline void rebalance_tick(int cpu, struct rq *rq, enum idle_type idle)
2889{
2890}
2891static inline void idle_balance(int cpu, struct rq *rq)
2892{
2893}
2894#endif
2895
2896static inline int wake_priority_sleeper(struct rq *rq)
2897{
2898        int ret = 0;
2899
2900#ifdef CONFIG_SCHED_SMT
2901        spin_lock(&rq->lock);
2902        /*
2903         * If an SMT sibling task has been put to sleep for priority
2904         * reasons reschedule the idle task to see if it can now run.
2905         */
2906        if (rq->nr_running) {
2907                resched_task(rq->idle);
2908                ret = 1;
2909        }
2910        spin_unlock(&rq->lock);
2911#endif
2912        return ret;
2913}
2914
2915DEFINE_PER_CPU(struct kernel_stat, kstat);
2916
2917EXPORT_PER_CPU_SYMBOL(kstat);
2918
2919/*
2920 * This is called on clock ticks and on context switches.
2921 * Bank in p->sched_time the ns elapsed since the last tick or switch.
2922 */
2923static inline void
2924update_cpu_clock(struct task_struct *p, struct rq *rq, unsigned long long now)
2925{
2926        p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick);
2927}
2928
2929/*
2930 * Return current->sched_time plus any more ns on the sched_clock
2931 * that have not yet been banked.
2932 */
2933unsigned long long current_sched_time(const struct task_struct *p)
2934{
2935        unsigned long long ns;
2936        unsigned long flags;
2937
2938        local_irq_save(flags);
2939        ns = max(p->timestamp, task_rq(p)->timestamp_last_tick);
2940        ns = p->sched_time + sched_clock() - ns;
2941        local_irq_restore(flags);
2942
2943        return ns;
2944}
2945
2946/*
2947 * We place interactive tasks back into the active array, if possible.
2948 *
2949 * To guarantee that this does not starve expired tasks we ignore the
2950 * interactivity of a task if the first expired task had to wait more
2951 * than a 'reasonable' amount of time. This deadline timeout is
2952 * load-dependent, as the frequency of array switched decreases with
2953 * increasing number of running tasks. We also ignore the interactivity
2954 * if a better static_prio task has expired:
2955 */
2956static inline int expired_starving(struct rq *rq)
2957{
2958        if (rq->curr->static_prio > rq->best_expired_prio)
2959                return 1;
2960        if (!STARVATION_LIMIT || !rq->expired_timestamp)
2961                return 0;
2962        if (jiffies - rq->expired_timestamp > STARVATION_LIMIT * rq->nr_running)
2963                return 1;
2964        return 0;
2965}
2966
2967/*
2968 * Account user cpu time to a process.
2969 * @p: the process that the cpu time gets accounted to
2970 * @hardirq_offset: the offset to subtract from hardirq_count()
2971 * @cputime: the cpu time spent in user space since the last update
2972 */
2973void account_user_time(struct task_struct *p, cputime_t cputime)
2974{
2975        struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2976        cputime64_t tmp;
2977
2978        p->utime = cputime_add(p->utime, cputime);
2979
2980        /* Add user time to cpustat. */
2981        tmp = cputime_to_cputime64(cputime);
2982        if (TASK_NICE(p) > 0)
2983                cpustat->nice = cputime64_add(cpustat->nice, tmp);
2984        else
2985                cpustat->user = cputime64_add(cpustat->user, tmp);
2986}
2987
2988/*
2989 * Account system cpu time to a process.
2990 * @p: the process that the cpu time gets accounted to
2991 * @hardirq_offset: the offset to subtract from hardirq_count()
2992 * @cputime: the cpu time spent in kernel space since the last update
2993 */
2994void account_system_time(struct task_struct *p, int hardirq_offset,
2995                         cputime_t cputime)
2996{
2997        struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2998        struct rq *rq = this_rq();
2999        cputime64_t tmp;
3000
3001        p->stime = cputime_add(p->stime, cputime);
3002
3003        /* Add system time to cpustat. */
3004        tmp = cputime_to_cputime64(cputime);
3005        if (hardirq_count() - hardirq_offset)
3006                cpustat->irq = cputime64_add(cpustat->irq, tmp);
3007        else if (softirq_count())
3008                cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
3009        else if (p != rq->idle)
3010                cpustat->system = cputime64_add(cpustat->system, tmp);
3011        else if (atomic_read(&rq->nr_iowait) > 0)
3012                cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
3013        else
3014                cpustat->idle = cputime64_add(cpustat->idle, tmp);
3015        /* Account for system time used */
3016        acct_update_integrals(p);
3017}
3018
3019/*
3020 * Account for involuntary wait time.
3021 * @p: the process from which the cpu time has been stolen
3022 * @steal: the cpu time spent in involuntary wait
3023 */
3024void account_steal_time(struct task_struct *p, cputime_t steal)
3025{
3026        struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3027        cputime64_t tmp = cputime_to_cputime64(steal);
3028        struct rq *rq = this_rq();
3029
3030        if (p == rq->idle) {
3031                p->stime = cputime_add(p->stime, steal);
3032                if (atomic_read(&rq->nr_iowait) > 0)
3033                        cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
3034                else
3035                        cpustat->idle = cputime64_add(cpustat->idle, tmp);
3036        } else
3037                cpustat->steal = cputime64_add(cpustat->steal, tmp);
3038}
3039
3040/*
3041 * This function gets called by the timer code, with HZ frequency.
3042 * We call it with interrupts disabled.
3043 *
3044 * It also gets called by the fork code, when changing the parent's
3045 * timeslices.
3046 */
3047void scheduler_tick(void)
3048{
3049        unsigned long long now = sched_clock();
3050        struct task_struct *p = current;
3051        int cpu = smp_processor_id();
3052        struct rq *rq = cpu_rq(cpu);
3053
3054        update_cpu_clock(p, rq, now);
3055
3056        rq->timestamp_last_tick = now;
3057
3058        if (p == rq->idle) {
3059                if (wake_priority_sleeper(rq))
3060                        goto out;
3061                rebalance_tick(cpu, rq, SCHED_IDLE);
3062                return;
3063        }
3064
3065        /* Task might have expired already, but not scheduled off yet */
3066        if (p->array != rq->active) {
3067                set_tsk_need_resched(p);
3068                goto out;
3069        }
3070        spin_lock(&rq->lock);
3071        /*
3072         * The task was running during this tick - update the
3073         * time slice counter. Note: we do not update a thread's
3074         * priority until it either goes to sleep or uses up its
3075         * timeslice. This makes it possible for interactive tasks
3076         * to use up their timeslices at their highest priority levels.
3077         */
3078        if (rt_task(p)) {
3079                /*
3080                 * RR tasks need a special form of timeslice management.
3081                 * FIFO tasks have no timeslices.
3082                 */
3083                if ((p->policy == SCHED_RR) && !--p->time_slice) {
3084                        p->time_slice = task_timeslice(p);
3085                        p->first_time_slice = 0;
3086                        set_tsk_need_resched(p);
3087
3088                        /* put it at the end of the queue: */
3089                        requeue_task(p, rq->active);
3090                }
3091                goto out_unlock;
3092        }
3093        if (!--p->time_slice) {
3094                dequeue_task(p, rq->active);
3095                set_tsk_need_resched(p);
3096                p->prio = effective_prio(p);
3097                p->time_slice = task_timeslice(p);
3098                p->first_time_slice = 0;
3099
3100                if (!rq->expired_timestamp)
3101                        rq->expired_timestamp = jiffies;
3102                if (!TASK_INTERACTIVE(p) || expired_starving(rq)) {
3103                        enqueue_task(p, rq->expired);
3104                        if (p->static_prio < rq->best_expired_prio)
3105                                rq->best_expired_prio = p->static_prio;
3106                } else
3107                        enqueue_task(p, rq->active);
3108        } else {
3109                /*
3110                 * Prevent a too long timeslice allowing a task to monopolize
3111                 * the CPU. We do this by splitting up the timeslice into
3112                 * smaller pieces.
3113                 *
3114                 * Note: this does not mean the task's timeslices expire or
3115                 * get lost in any way, they just might be preempted by
3116                 * another task of equal priority. (one with higher
3117                 * priority would have preempted this task already.) We
3118                 * requeue this task to the end of the list on this priority
3119                 * level, which is in essence a round-robin of tasks with
3120                 * equal priority.
3121                 *
3122                 * This only applies to tasks in the interactive
3123                 * delta range with at least TIMESLICE_GRANULARITY to requeue.
3124                 */
3125                if (TASK_INTERACTIVE(p) && !((task_timeslice(p) -
3126                        p->time_slice) % TIMESLICE_GRANULARITY(p)) &&
3127                        (p->time_slice >= TIMESLICE_GRANULARITY(p)) &&
3128                        (p->array == rq->active)) {
3129
3130                        requeue_task(p, rq->active);
3131                        set_tsk_need_resched(p);
3132                }
3133        }
3134out_unlock:
3135        spin_unlock(&rq->lock);
3136out:
3137        rebalance_tick(cpu, rq, NOT_IDLE);
3138}
3139
3140#ifdef CONFIG_SCHED_SMT
3141static inline void wakeup_busy_runqueue(struct rq *rq)
3142{
3143        /* If an SMT runqueue is sleeping due to priority reasons wake it up */
3144        if (rq->curr == rq->idle && rq->nr_running)
3145                resched_task(rq->idle);
3146}
3147
3148/*
3149 * Called with interrupt disabled and this_rq's runqueue locked.
3150 */
3151static void wake_sleeping_dependent(int this_cpu)
3152{
3153        struct sched_domain *tmp, *sd = NULL;
3154        int i;
3155
3156        for_each_domain(this_cpu, tmp) {
3157                if (tmp->flags & SD_SHARE_CPUPOWER) {
3158                        sd = tmp;
3159                        break;
3160                }
3161        }
3162
3163        if (!sd)
3164                return;
3165
3166        for_each_cpu_mask(i, sd->span) {
3167                struct rq *smt_rq = cpu_rq(i);
3168
3169                if (i == this_cpu)
3170                        continue;
3171                if (unlikely(!spin_trylock(&smt_rq->lock)))
3172                        continue;
3173
3174                wakeup_busy_runqueue(smt_rq);
3175                spin_unlock(&smt_rq->lock);
3176        }
3177}
3178
3179/*
3180 * number of 'lost' timeslices this task wont be able to fully
3181 * utilize, if another task runs on a sibling. This models the
3182 * slowdown effect of other tasks running on siblings:
3183 */
3184static inline unsigned long
3185smt_slice(struct task_struct *p, struct sched_domain *sd)
3186{
3187        return p->time_slice * (100 - sd->per_cpu_gain) / 100;
3188}
3189
3190/*
3191 * To minimise lock contention and not have to drop this_rq's runlock we only
3192 * trylock the sibling runqueues and bypass those runqueues if we fail to
3193 * acquire their lock. As we only trylock the normal locking order does not
3194 * need to be obeyed.
3195 */
3196static int
3197dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p)
3198{
3199        struct sched_domain *tmp, *sd = NULL;
3200        int ret = 0, i;
3201
3202        /* kernel/rt threads do not participate in dependent sleeping */
3203        if (!p->mm || rt_task(p))
3204                return 0;
3205
3206        for_each_domain(this_cpu, tmp) {
3207                if (tmp->flags & SD_SHARE_CPUPOWER) {
3208                        sd = tmp;
3209                        break;
3210                }
3211        }
3212
3213        if (!sd)
3214                return 0;
3215
3216        for_each_cpu_mask(i, sd->span) {
3217                struct task_struct *smt_curr;
3218                struct rq *smt_rq;
3219
3220                if (i == this_cpu)
3221                        continue;
3222
3223                smt_rq = cpu_rq(i);
3224                if (unlikely(!spin_trylock(&smt_rq->lock)))
3225                        continue;
3226
3227                smt_curr = smt_rq->curr;
3228
3229                if (!smt_curr->mm)
3230                        goto unlock;
3231
3232                /*
3233                 * If a user task with lower static priority than the
3234                 * running task on the SMT sibling is trying to schedule,
3235                 * delay it till there is proportionately less timeslice
3236                 * left of the sibling task to prevent a lower priority
3237                 * task from using an unfair proportion of the
3238                 * physical cpu's resources. -ck
3239                 */
3240                if (rt_task(smt_curr)) {
3241                        /*
3242                         * With real time tasks we run non-rt tasks only
3243                         * per_cpu_gain% of the time.
3244                         */
3245                        if ((jiffies % DEF_TIMESLICE) >
3246                                (sd->per_cpu_gain * DEF_TIMESLICE / 100))
3247                                        ret = 1;
3248                } else {
3249                        if (smt_curr->static_prio < p->static_prio &&
3250                                !TASK_PREEMPTS_CURR(p, smt_rq) &&
3251                                smt_slice(smt_curr, sd) > task_timeslice(p))
3252                                        ret = 1;
3253                }
3254unlock:
3255                spin_unlock(&smt_rq->lock);
3256        }
3257        return ret;
3258}
3259#else
3260static inline void wake_sleeping_dependent(int this_cpu)
3261{
3262}
3263static inline int
3264dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p)
3265{
3266        return 0;
3267}
3268#endif
3269
3270#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
3271
3272void fastcall add_preempt_count(int val)
3273{
3274        /*
3275         * Underflow?
3276         */
3277        if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3278                return;
3279        preempt_count() += val;
3280        /*
3281         * Spinlock count overflowing soon?
3282         */
3283        DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10);
3284}
3285EXPORT_SYMBOL(add_preempt_count);
3286
3287void fastcall sub_preempt_count(int val)
3288{
3289        /*
3290         * Underflow?
3291         */
3292        if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
3293                return;
3294        /*
3295         * Is the spinlock portion underflowing?
3296         */
3297        if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3298                        !(preempt_count() & PREEMPT_MASK)))
3299                return;
3300
3301        preempt_count() -= val;
3302}
3303EXPORT_SYMBOL(sub_preempt_count);
3304
3305#endif
3306
3307static inline int interactive_sleep(enum sleep_type sleep_type)
3308{
3309        return (sleep_type == SLEEP_INTERACTIVE ||
3310                sleep_type == SLEEP_INTERRUPTED);
3311}
3312
3313/*
3314 * schedule() is the main scheduler function.
3315 */
3316asmlinkage void __sched schedule(void)
3317{
3318        struct task_struct *prev, *next;
3319        struct prio_array *array;
3320        struct list_head *queue;
3321        unsigned long long now;
3322        unsigned long run_time;
3323        int cpu, idx, new_prio;
3324        long *switch_count;
3325        struct rq *rq;
3326
3327        /*
3328         * Test if we are atomic.  Since do_exit() needs to call into
3329         * schedule() atomically, we ignore that path for now.
3330         * Otherwise, whine if we are scheduling when we should not be.
3331         */
3332        if (unlikely(in_atomic() && !current->exit_state)) {
3333                printk(KERN_ERR "BUG: scheduling while atomic: "
3334                        "%s/0x%08x/%d\n",
3335                        current->comm, preempt_count(), current->pid);
3336                dump_stack();
3337        }
3338        profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3339
3340need_resched:
3341        preempt_disable();
3342        prev = current;
3343        release_kernel_lock(prev);
3344need_resched_nonpreemptible:
3345        rq = this_rq();
3346
3347        /*
3348         * The idle thread is not allowed to schedule!
3349         * Remove this check after it has been exercised a bit.
3350         */
3351        if (unlikely(prev == rq->idle) && prev->state != TASK_RUNNING) {
3352                printk(KERN_ERR "bad: scheduling from the idle thread!\n");
3353                dump_stack();
3354        }
3355
3356        schedstat_inc(rq, sched_cnt);
3357        now = sched_clock();
3358        if (likely((long long)(now - prev->timestamp) < NS_MAX_SLEEP_AVG)) {
3359                run_time = now - prev->timestamp;
3360                if (unlikely((long long)(now - prev->timestamp) < 0))
3361                        run_time = 0;
3362        } else
3363                run_time = NS_MAX_SLEEP_AVG;
3364
3365        /*
3366         * Tasks charged proportionately less run_time at high sleep_avg to
3367         * delay them losing their interactive status
3368         */
3369        run_time /= (CURRENT_BONUS(prev) ? : 1);
3370
3371        spin_lock_irq(&rq->lock);
3372
3373        switch_count = &prev->nivcsw;
3374        if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
3375                switch_count = &prev->nvcsw;
3376                if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
3377                                unlikely(signal_pending(prev))))
3378                        prev->state = TASK_RUNNING;
3379                else {
3380                        if (prev->state == TASK_UNINTERRUPTIBLE)
3381                                rq->nr_uninterruptible++;
3382                        deactivate_task(prev, rq);
3383                }
3384        }
3385
3386        cpu = smp_processor_id();
3387        if (unlikely(!rq->nr_running)) {
3388                idle_balance(cpu, rq);
3389                if (!rq->nr_running) {
3390                        next = rq->idle;
3391                        rq->expired_timestamp = 0;
3392                        wake_sleeping_dependent(cpu);
3393                        goto switch_tasks;
3394                }
3395        }
3396
3397        array = rq->active;
3398        if (unlikely(!array->nr_active)) {
3399                /*
3400                 * Switch the active and expired arrays.
3401                 */
3402                schedstat_inc(rq, sched_switch);
3403                rq->active = rq->expired;
3404                rq->expired = array;
3405                array = rq->active;
3406                rq->expired_timestamp = 0;
3407                rq->best_expired_prio = MAX_PRIO;
3408        }
3409
3410        idx = sched_find_first_bit(array->bitmap);
3411        queue = array->queue + idx;
3412        next = list_entry(queue->next, struct task_struct, run_list);
3413
3414        if (!rt_task(next) && interactive_sleep(next->sleep_type)) {
3415                unsigned long long delta = now - next->timestamp;
3416                if (unlikely((long long)(now - next->timestamp) < 0))
3417                        delta = 0;
3418
3419                if (next->sleep_type == SLEEP_INTERACTIVE)
3420                        delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
3421
3422                array = next->array;
3423                new_prio = recalc_task_prio(next, next->timestamp + delta);
3424
3425                if (unlikely(next->prio != new_prio)) {
3426                        dequeue_task(next, array);
3427                        next->prio = new_prio;
3428                        enqueue_task(next, array);
3429                }
3430        }
3431        next->sleep_type = SLEEP_NORMAL;
3432        if (dependent_sleeper(cpu, rq, next))
3433                next = rq->idle;
3434switch_tasks:
3435        if (next == rq->idle)
3436                schedstat_inc(rq, sched_goidle);
3437        prefetch(next);
3438        prefetch_stack(next);
3439        clear_tsk_need_resched(prev);
3440        rcu_qsctr_inc(task_cpu(prev));
3441
3442        update_cpu_clock(prev, rq, now);
3443
3444        prev->sleep_avg -= run_time;
3445        if ((long)prev->sleep_avg <= 0)
3446                prev->sleep_avg = 0;
3447        prev->timestamp = prev->last_ran = now;
3448
3449        sched_info_switch(prev, next);
3450        if (likely(prev != next)) {
3451                next->timestamp = now;
3452                rq->nr_switches++;
3453                rq->curr = next;
3454                ++*switch_count;
3455
3456                prepare_task_switch(rq, next);
3457                prev = context_switch(rq, prev, next);
3458                barrier();
3459                /*
3460                 * this_rq must be evaluated again because prev may have moved
3461                 * CPUs since it called schedule(), thus the 'rq' on its stack
3462                 * frame will be invalid.
3463                 */
3464                finish_task_switch(this_rq(), prev);
3465        } else
3466                spin_unlock_irq(&rq->lock);
3467
3468        prev = current;
3469        if (unlikely(reacquire_kernel_lock(prev) < 0))
3470                goto need_resched_nonpreemptible;
3471        preempt_enable_no_resched();
3472        if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
3473                goto need_resched;
3474}
3475EXPORT_SYMBOL(schedule);
3476
3477#ifdef CONFIG_PREEMPT
3478/*
3479 * this is the entry point to schedule() from in-kernel preemption
3480 * off of preempt_enable.  Kernel preemptions off return from interrupt
3481 * occur there and call schedule directly.
3482 */
3483asmlinkage void __sched preempt_schedule(void)
3484{
3485        struct thread_info *ti = current_thread_info();
3486#ifdef CONFIG_PREEMPT_BKL
3487        struct task_struct *task = current;
3488        int saved_lock_depth;
3489#endif
3490        /*
3491         * If there is a non-zero preempt_count or interrupts are disabled,
3492         * we do not want to preempt the current task.  Just return..
3493         */
3494        if (likely(ti->preempt_count || irqs_disabled()))
3495                return;
3496
3497need_resched:
3498        add_preempt_count(PREEMPT_ACTIVE);
3499        /*
3500         * We keep the big kernel semaphore locked, but we
3501         * clear ->lock_depth so that schedule() doesnt
3502         * auto-release the semaphore:
3503         */
3504#ifdef CONFIG_PREEMPT_BKL
3505        saved_lock_depth = task->lock_depth;
3506        task->lock_depth = -1;
3507#endif
3508        schedule();
3509#ifdef CONFIG_PREEMPT_BKL
3510        task->lock_depth = saved_lock_depth;
3511#endif
3512        sub_preempt_count(PREEMPT_ACTIVE);
3513
3514        /* we could miss a preemption opportunity between schedule and now */
3515        barrier();
3516        if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
3517                goto need_resched;
3518}
3519EXPORT_SYMBOL(preempt_schedule);
3520
3521/*
3522 * this is the entry point to schedule() from kernel preemption
3523 * off of irq context.
3524 * Note, that this is called and return with irqs disabled. This will
3525 * protect us against recursive calling from irq.
3526 */
3527asmlinkage void __sched preempt_schedule_irq(void)
3528{
3529        struct thread_info *ti = current_thread_info();
3530#ifdef CONFIG_PREEMPT_BKL
3531        struct task_struct *task = current;
3532        int saved_lock_depth;
3533#endif
3534        /* Catch callers which need to be fixed */
3535        BUG_ON(ti->preempt_count || !irqs_disabled());
3536
3537need_resched:
3538        add_preempt_count(PREEMPT_ACTIVE);
3539        /*
3540         * We keep the big kernel semaphore locked, but we
3541         * clear ->lock_depth so that schedule() doesnt
3542         * auto-release the semaphore:
3543         */
3544#ifdef CONFIG_PREEMPT_BKL
3545        saved_lock_depth = task->lock_depth;
3546        task->lock_depth = -1;
3547#endif
3548        local_irq_enable();
3549        schedule();
3550        local_irq_disable();
3551#ifdef CONFIG_PREEMPT_BKL
3552        task->lock_depth = saved_lock_depth;
3553#endif
3554        sub_preempt_count(PREEMPT_ACTIVE);
3555
3556        /* we could miss a preemption opportunity between schedule and now */
3557        barrier();
3558        if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
3559                goto need_resched;
3560}
3561
3562#endif /* CONFIG_PREEMPT */
3563
3564int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
3565                          void *key)
3566{
3567        return try_to_wake_up(curr->private, mode, sync);
3568}
3569EXPORT_SYMBOL(default_wake_function);
3570
3571/*
3572 * The core wakeup function.  Non-exclusive wakeups (nr_exclusive == 0) just
3573 * wake everything up.  If it's an exclusive wakeup (nr_exclusive == small +ve
3574 * number) then we wake all the non-exclusive tasks and one exclusive task.
3575 *
3576 * There are circumstances in which we can try to wake a task which has already
3577 * started to run but is not in state TASK_RUNNING.  try_to_wake_up() returns
3578 * zero in this (rare) case, and we handle it by continuing to scan the queue.
3579 */
3580static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
3581                             int nr_exclusive, int sync, void *key)
3582{
3583        struct list_head *tmp, *next;
3584
3585        list_for_each_safe(tmp, next, &q->task_list) {
3586                wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
3587                unsigned flags = curr->flags;
3588
3589                if (curr->func(curr, mode, sync, key) &&
3590                                (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
3591                        break;
3592        }
3593}
3594
3595/**
3596 * __wake_up - wake up threads blocked on a waitqueue.
3597 * @q: the waitqueue
3598 * @mode: which threads
3599 * @nr_exclusive: how many wake-one or wake-many threads to wake up
3600 * @key: is directly passed to the wakeup function
3601 */
3602void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
3603                        int nr_exclusive, void *key)
3604{
3605        unsigned long flags;
3606
3607        spin_lock_irqsave(&q->lock, flags);
3608        __wake_up_common(q, mode, nr_exclusive, 0, key);
3609        spin_unlock_irqrestore(&q->lock, flags);
3610}
3611EXPORT_SYMBOL(__wake_up);
3612
3613/*
3614 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
3615 */
3616void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
3617{
3618        __wake_up_common(q, mode, 1, 0, NULL);
3619}
3620
3621/**
3622 * __wake_up_sync - wake up threads blocked on a waitqueue.
3623 * @q: the waitqueue
3624 * @mode: which threads
3625 * @nr_exclusive: how many wake-one or wake-many threads to wake up
3626 *
3627 * The sync wakeup differs that the waker knows that it will schedule
3628 * away soon, so while the target thread will be woken up, it will not
3629 * be migrated to another CPU - ie. the two threads are 'synchronized'
3630 * with each other. This can prevent needless bouncing between CPUs.
3631 *
3632 * On UP it can prevent extra preemption.
3633 */
3634void fastcall
3635__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
3636{
3637        unsigned long flags;
3638        int sync = 1;
3639
3640        if (unlikely(!q))
3641                return;
3642
3643        if (unlikely(!nr_exclusive))
3644                sync = 0;
3645
3646        spin_lock_irqsave(&q->lock, flags);
3647        __wake_up_common(q, mode, nr_exclusive, sync, NULL);
3648        spin_unlock_irqrestore(&q->lock, flags);
3649}
3650EXPORT_SYMBOL_GPL(__wake_up_sync);      /* For internal use only */
3651
3652void fastcall complete(struct completion *x)
3653{
3654        unsigned long flags;
3655
3656        spin_lock_irqsave(&x->wait.lock, flags);
3657        x->done++;
3658        __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
3659                         1, 0, NULL);
3660        spin_unlock_irqrestore(&x->wait.lock, flags);
3661}
3662EXPORT_SYMBOL(complete);
3663
3664void fastcall complete_all(struct completion *x)
3665{
3666        unsigned long flags;
3667
3668        spin_lock_irqsave(&x->wait.lock, flags);
3669        x->done += UINT_MAX/2;
3670        __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
3671                         0, 0, NULL);
3672        spin_unlock_irqrestore(&x->wait.lock, flags);
3673}
3674EXPORT_SYMBOL(complete_all);
3675
3676void fastcall __sched wait_for_completion(struct completion *x)
3677{
3678        might_sleep();
3679
3680        spin_lock_irq(&x->wait.lock);
3681        if (!x->done) {
3682                DECLARE_WAITQUEUE(wait, current);
3683
3684                wait.flags |= WQ_FLAG_EXCLUSIVE;
3685                __add_wait_queue_tail(&x->wait, &wait);
3686                do {
3687                        __set_current_state(TASK_UNINTERRUPTIBLE);
3688                        spin_unlock_irq(&x->wait.lock);
3689                        schedule();
3690                        spin_lock_irq(&x->wait.lock);
3691                } while (!x->done);
3692                __remove_wait_queue(&x->wait, &wait);
3693        }
3694        x->done--;
3695        spin_unlock_irq(&x->wait.lock);
3696}
3697EXPORT_SYMBOL(wait_for_completion);
3698
3699unsigned long fastcall __sched
3700wait_for_completion_timeout(struct completion *x, unsigned long timeout)
3701{
3702        might_sleep();
3703
3704        spin_lock_irq(&x->wait.lock);
3705        if (!x->done) {
3706                DECLARE_WAITQUEUE(wait, current);
3707
3708                wait.flags |= WQ_FLAG_EXCLUSIVE;
3709                __add_wait_queue_tail(&x->wait, &wait);
3710                do {
3711                        __set_current_state(TASK_UNINTERRUPTIBLE);
3712                        spin_unlock_irq(&x->wait.lock);
3713                        timeout = schedule_timeout(timeout);
3714                        spin_lock_irq(&x->wait.lock);
3715                        if (!timeout) {
3716                                __remove_wait_queue(&x->wait, &wait);
3717                                goto out;
3718                        }
3719                } while (!x->done);
3720                __remove_wait_queue(&x->wait, &wait);
3721        }
3722        x->done--;
3723out:
3724        spin_unlock_irq(&x->wait.lock);
3725        return timeout;
3726}
3727EXPORT_SYMBOL(wait_for_completion_timeout);
3728
3729int fastcall __sched wait_for_completion_interruptible(struct completion *x)
3730{
3731        int ret = 0;
3732
3733        might_sleep();
3734
3735        spin_lock_irq(&x->wait.lock);
3736        if (!x->done) {
3737                DECLARE_WAITQUEUE(wait, current);
3738
3739                wait.flags |= WQ_FLAG_EXCLUSIVE;
3740                __add_wait_queue_tail(&x->wait, &wait);
3741                do {
3742                        if (signal_pending(current)) {
3743                                ret = -ERESTARTSYS;
3744                                __remove_wait_queue(&x->wait, &wait);
3745                                goto out;
3746                        }
3747                        __set_current_state(TASK_INTERRUPTIBLE);
3748                        spin_unlock_irq(&x->wait.lock);
3749                        schedule();
3750                        spin_lock_irq(&x->wait.lock);
3751                } while (!x->done);
3752                __remove_wait_queue(&x->wait, &wait);
3753        }
3754        x->done--;
3755out:
3756        spin_unlock_irq(&x->wait.lock);
3757
3758        return ret;
3759}
3760EXPORT_SYMBOL(wait_for_completion_interruptible);
3761
3762unsigned long fastcall __sched
3763wait_for_completion_interruptible_timeout(struct completion *x,
3764                                          unsigned long timeout)
3765{
3766        might_sleep();
3767
3768        spin_lock_irq(&x->wait.lock);
3769        if (!x->done) {
3770                DECLARE_WAITQUEUE(wait, current);
3771
3772                wait.flags |= WQ_FLAG_EXCLUSIVE;
3773                __add_wait_queue_tail(&x->wait, &wait);
3774                do {
3775                        if (signal_pending(current)) {
3776                                timeout = -ERESTARTSYS;
3777                                __remove_wait_queue(&x->wait, &wait);
3778                                goto out;
3779                        }
3780                        __set_current_state(TASK_INTERRUPTIBLE);
3781                        spin_unlock_irq(&x->wait.lock);
3782                        timeout = schedule_timeout(timeout);
3783                        spin_lock_irq(&x->wait.lock);
3784                        if (!timeout) {
3785                                __remove_wait_queue(&x->wait, &wait);
3786                                goto out;
3787                        }
3788                } while (!x->done);
3789                __remove_wait_queue(&x->wait, &wait);
3790        }
3791        x->done--;
3792out:
3793        spin_unlock_irq(&x->wait.lock);
3794        return timeout;
3795}
3796EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
3797
3798
3799#define SLEEP_ON_VAR                                    \
3800        unsigned long flags;                            \
3801        wait_queue_t wait;                              \
3802        init_waitqueue_entry(&wait, current);
3803
3804#define SLEEP_ON_HEAD                                   \
3805        spin_lock_irqsave(&q->lock,flags);              \
3806        __add_wait_queue(q, &wait);                     \
3807        spin_unlock(&q->lock);
3808
3809#define SLEEP_ON_TAIL                                   \
3810        spin_lock_irq(&q->lock);                        \
3811        __remove_wait_queue(q, &wait);                  \
3812        spin_unlock_irqrestore(&q->lock, flags);
3813
3814void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q)
3815{
3816        SLEEP_ON_VAR
3817
3818        current->state = TASK_INTERRUPTIBLE;
3819
3820        SLEEP_ON_HEAD
3821        schedule();
3822        SLEEP_ON_TAIL
3823}
3824EXPORT_SYMBOL(interruptible_sleep_on);
3825
3826long fastcall __sched
3827interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
3828{
3829        SLEEP_ON_VAR
3830
3831        current->state = TASK_INTERRUPTIBLE;
3832
3833        SLEEP_ON_HEAD
3834        timeout = schedule_timeout(timeout);
3835        SLEEP_ON_TAIL
3836
3837        return timeout;
3838}
3839EXPORT_SYMBOL(interruptible_sleep_on_timeout);
3840
3841void fastcall __sched sleep_on(wait_queue_head_t *q)
3842{
3843        SLEEP_ON_VAR
3844
3845        current->state = TASK_UNINTERRUPTIBLE;
3846
3847        SLEEP_ON_HEAD
3848        schedule();
3849        SLEEP_ON_TAIL
3850}
3851EXPORT_SYMBOL(sleep_on);
3852
3853long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
3854{
3855        SLEEP_ON_VAR
3856
3857        current->state = TASK_UNINTERRUPTIBLE;
3858
3859        SLEEP_ON_HEAD
3860        timeout = schedule_timeout(timeout);
3861        SLEEP_ON_TAIL
3862
3863        return timeout;
3864}
3865
3866EXPORT_SYMBOL(sleep_on_timeout);
3867
3868#ifdef CONFIG_RT_MUTEXES
3869
3870/*
3871 * rt_mutex_setprio - set the current priority of a task
3872 * @p: task
3873 * @prio: prio value (kernel-internal form)
3874 *
3875 * This function changes the 'effective' priority of a task. It does
3876 * not touch ->normal_prio like __setscheduler().
3877 *
3878 * Used by the rt_mutex code to implement priority inheritance logic.
3879 */
3880void rt_mutex_setprio(struct task_struct *p, int prio)
3881{
3882        struct prio_array *array;
3883        unsigned long flags;
3884        struct rq *rq;
3885        int oldprio;
3886
3887        BUG_ON(prio < 0 || prio > MAX_PRIO);
3888
3889        rq = task_rq_lock(p, &flags);
3890
3891        oldprio = p->prio;
3892        array = p->array;
3893        if (array)
3894                dequeue_task(p, array);
3895        p->prio = prio;
3896
3897        if (array) {
3898                /*
3899                 * If changing to an RT priority then queue it
3900                 * in the active array!
3901                 */
3902                if (rt_task(p))
3903                        array = rq->active;
3904                enqueue_task(p, array);
3905                /*
3906                 * Reschedule if we are currently running on this runqueue and
3907                 * our priority decreased, or if we are not currently running on
3908                 * this runqueue and our priority is higher than the current's
3909                 */
3910                if (task_running(rq, p)) {
3911                        if (p->prio > oldprio)
3912                                resched_task(rq->curr);
3913                } else if (TASK_PREEMPTS_CURR(p, rq))
3914                        resched_task(rq->curr);
3915        }
3916        task_rq_unlock(rq, &flags);
3917}
3918
3919#endif
3920
3921void set_user_nice(struct task_struct *p, long nice)
3922{
3923        struct prio_array *array;
3924        int old_prio, delta;
3925        unsigned long flags;
3926        struct rq *rq;
3927
3928        if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
3929                return;
3930        /*
3931         * We have to be careful, if called from sys_setpriority(),
3932         * the task might be in the middle of scheduling on another CPU.
3933         */
3934        rq = task_rq_lock(p, &flags);
3935        /*
3936         * The RT priorities are set via sched_setscheduler(), but we still
3937         * allow the 'normal' nice value to be set - but as expected
3938         * it wont have any effect on scheduling until the task is
3939         * not SCHED_NORMAL/SCHED_BATCH:
3940         */
3941        if (has_rt_policy(p)) {
3942                p->static_prio = NICE_TO_PRIO(nice);
3943                goto out_unlock;
3944        }
3945        array = p->array;
3946        if (array) {
3947                dequeue_task(p, array);
3948                dec_raw_weighted_load(rq, p);
3949        }
3950
3951        p->static_prio = NICE_TO_PRIO(nice);
3952        set_load_weight(p);
3953        old_prio = p->prio;
3954        p->prio = effective_prio(p);
3955        delta = p->prio - old_prio;
3956
3957        if (array) {
3958                enqueue_task(p, array);
3959                inc_raw_weighted_load(rq, p);
3960                /*
3961                 * If the task increased its priority or is running and
3962                 * lowered its priority, then reschedule its CPU:
3963                 */
3964                if (delta < 0 || (delta > 0 && task_running(rq, p)))
3965                        resched_task(rq->curr);
3966        }
3967out_unlock:
3968        task_rq_unlock(rq, &flags);
3969}
3970EXPORT_SYMBOL(set_user_nice);
3971
3972/*
3973 * can_nice - check if a task can reduce its nice value
3974 * @p: task
3975 * @nice: nice value
3976 */
3977int can_nice(const struct task_struct *p, const int nice)
3978{
3979        /* convert nice value [19,-20] to rlimit style value [1,40] */
3980        int nice_rlim = 20 - nice;
3981
3982        return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
3983                capable(CAP_SYS_NICE));
3984}
3985
3986#ifdef __ARCH_WANT_SYS_NICE
3987
3988/*
3989 * sys_nice - change the priority of the current process.
3990 * @increment: priority increment
3991 *
3992 * sys_setpriority is a more generic, but much slower function that
3993 * does similar things.
3994 */
3995asmlinkage long sys_nice(int increment)
3996{
3997        long nice, retval;
3998
3999        /*
4000         * Setpriority might change our priority at the same moment.
4001         * We don't have to worry. Conceptually one call occurs first
4002         * and we have a single winner.
4003         */
4004        if (increment < -40)
4005                increment = -40;
4006        if (increment > 40)
4007                increment = 40;
4008
4009        nice = PRIO_TO_NICE(current->static_prio) + increment;
4010        if (nice < -20)
4011                nice = -20;
4012        if (nice > 19)
4013                nice = 19;
4014
4015        if (increment < 0 && !can_nice(current, nice))
4016                return -EPERM;
4017
4018        retval = security_task_setnice(current, nice);
4019        if (retval)
4020                return retval;
4021
4022        set_user_nice(current, nice);
4023        return 0;
4024}
4025
4026#endif
4027
4028/**
4029 * task_prio - return the priority value of a given task.
4030 * @p: the task in question.
4031 *
4032 * This is the priority value as seen by users in /proc.
4033 * RT tasks are offset by -200. Normal tasks are centered
4034 * around 0, value goes from -16 to +15.
4035 */
4036int task_prio(const struct task_struct *p)
4037{
4038        return p->prio - MAX_RT_PRIO;
4039}
4040
4041/**
4042 * task_nice - return the nice value of a given task.
4043 * @p: the task in question.
4044 */
4045int task_nice(const struct task_struct *p)
4046{
4047        return TASK_NICE(p);
4048}
4049EXPORT_SYMBOL_GPL(task_nice);
4050
4051/**
4052 * idle_cpu - is a given cpu idle currently?
4053 * @cpu: the processor in question.
4054 */
4055int idle_cpu(int cpu)
4056{
4057        return cpu_curr(cpu) == cpu_rq(cpu)->idle;
4058}
4059
4060/**
4061 * idle_task - return the idle task for a given cpu.
4062 * @cpu: the processor in question.
4063 */
4064struct task_struct *idle_task(int cpu)
4065{
4066        return cpu_rq(cpu)->idle;
4067}
4068
4069/**
4070 * find_process_by_pid - find a process with a matching PID value.
4071 * @pid: the pid in question.
4072 */
4073static inline struct task_struct *find_process_by_pid(pid_t pid)
4074{
4075        return pid ? find_task_by_pid(pid) : current;
4076}
4077
4078/* Actually do priority change: must hold rq lock. */
4079static void __setscheduler(struct task_struct *p, int policy, int prio)
4080{
4081        BUG_ON(p->array);
4082
4083        p->policy = policy;
4084        p->rt_priority = prio;
4085        p->normal_prio = normal_prio(p);
4086        /* we are holding p->pi_lock already */
4087        p->prio = rt_mutex_getprio(p);
4088        /*
4089         * SCHED_BATCH tasks are treated as perpetual CPU hogs:
4090         */
4091        if (policy == SCHED_BATCH)
4092                p->sleep_avg = 0;
4093        set_load_weight(p);
4094}
4095
4096/**
4097 * sched_setscheduler - change the scheduling policy and/or RT priority of
4098 * a thread.
4099 * @p: the task in question.
4100 * @policy: new policy.
4101 * @param: structure containing the new RT priority.
4102 *
4103 * NOTE: the task may be already dead
4104 */
4105int sched_setscheduler(struct task_struct *p, int policy,
4106                       struct sched_param *param)
4107{
4108        int retval, oldprio, oldpolicy = -1;
4109        struct prio_array *array;
4110        unsigned long flags;
4111        struct rq *rq;
4112
4113        /* may grab non-irq protected spin_locks */
4114        BUG_ON(in_interrupt());
4115recheck:
4116        /* double check policy once rq lock held */
4117        if (policy < 0)
4118                policy = oldpolicy = p->policy;
4119        else if (policy != SCHED_FIFO && policy != SCHED_RR &&
4120                        policy != SCHED_NORMAL && policy != SCHED_BATCH)
4121                return -EINVAL;
4122        /*
4123         * Valid priorities for SCHED_FIFO and SCHED_RR are
4124         * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and
4125         * SCHED_BATCH is 0.
4126         */
4127        if (param->sched_priority < 0 ||
4128            (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
4129            (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
4130                return -EINVAL;
4131        if (is_rt_policy(policy) != (param->sched_priority != 0))
4132                return -EINVAL;
4133
4134        /*
4135         * Allow unprivileged RT tasks to decrease priority:
4136         */
4137        if (!capable(CAP_SYS_NICE)) {
4138                if (is_rt_policy(policy)) {
4139                        unsigned long rlim_rtprio;
4140                        unsigned long flags;
4141
4142                        if (!lock_task_sighand(p, &flags))
4143                                return -ESRCH;
4144                        rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
4145                        unlock_task_sighand(p, &flags);
4146
4147                        /* can't set/change the rt policy */
4148                        if (policy != p->policy && !rlim_rtprio)
4149                                return -EPERM;
4150
4151                        /* can't increase priority */
4152                        if (param->sched_priority > p->rt_priority &&
4153                            param->sched_priority > rlim_rtprio)
4154                                return -EPERM;
4155                }
4156
4157                /* can't change other user's priorities */
4158                if ((current->euid != p->euid) &&
4159                    (current->euid != p->uid))
4160                        return -EPERM;
4161        }
4162
4163        retval = security_task_setscheduler(p, policy, param);
4164        if (retval)
4165                return retval;
4166        /*
4167         * make sure no PI-waiters arrive (or leave) while we are
4168         * changing the priority of the task:
4169         */
4170        spin_lock_irqsave(&p->pi_lock, flags);
4171        /*
4172         * To be able to change p->policy safely, the apropriate
4173         * runqueue lock must be held.
4174         */
4175        rq = __task_rq_lock(p);
4176        /* recheck policy now with rq lock held */
4177        if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4178                policy = oldpolicy = -1;
4179                __task_rq_unlock(rq);
4180                spin_unlock_irqrestore(&p->pi_lock, flags);
4181                goto recheck;
4182        }
4183        array = p->array;
4184        if (array)
4185                deactivate_task(p, rq);
4186        oldprio = p->prio;
4187        __setscheduler(p, policy, param->sched_priority);
4188        if (array) {
4189                __activate_task(p, rq);
4190                /*
4191                 * Reschedule if we are currently running on this runqueue and
4192                 * our priority decreased, or if we are not currently running on
4193                 * this runqueue and our priority is higher than the current's
4194                 */
4195                if (task_running(rq, p)) {
4196                        if (p->prio > oldprio)
4197                                resched_task(rq->curr);
4198                } else if (TASK_PREEMPTS_CURR(p, rq))
4199                        resched_task(rq->curr);
4200        }
4201        __task_rq_unlock(rq);
4202        spin_unlock_irqrestore(&p->pi_lock, flags);
4203
4204        rt_mutex_adjust_pi(p);
4205
4206        return 0;
4207}
4208EXPORT_SYMBOL_GPL(sched_setscheduler);
4209
4210static int
4211do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4212{
4213        struct sched_param lparam;
4214        struct task_struct *p;
4215        int retval;
4216
4217        if (!param || pid < 0)
4218                return -EINVAL;
4219        if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4220                return -EFAULT;
4221
4222        rcu_read_lock();
4223        retval = -ESRCH;
4224        p = find_process_by_pid(pid);
4225        if (p != NULL)
4226                retval = sched_setscheduler(p, policy, &lparam);
4227        rcu_read_unlock();
4228
4229        return retval;
4230}
4231
4232/**
4233 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4234 * @pid: the pid in question.
4235 * @policy: new policy.
4236 * @param: structure containing the new RT priority.
4237 */
4238asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
4239                                       struct sched_param __user *param)
4240{
4241        /* negative values for policy are not valid */
4242        if (policy < 0)
4243                return -EINVAL;
4244
4245        return do_sched_setscheduler(pid, policy, param);
4246}
4247
4248/**
4249 * sys_sched_setparam - set/change the RT priority of a thread
4250 * @pid: the pid in question.
4251 * @param: structure containing the new RT priority.
4252 */
4253asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
4254{
4255        return do_sched_setscheduler(pid, -1, param);
4256}
4257
4258/**
4259 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4260 * @pid: the pid in question.
4261 */
4262asmlinkage long sys_sched_getscheduler(pid_t pid)
4263{
4264        struct task_struct *p;
4265        int retval = -EINVAL;
4266
4267        if (pid < 0)
4268                goto out_nounlock;
4269
4270        retval = -ESRCH;
4271        read_lock(&tasklist_lock);
4272        p = find_process_by_pid(pid);
4273        if (p) {
4274                retval = security_task_getscheduler(p);
4275                if (!retval)
4276                        retval = p->policy;
4277        }
4278        read_unlock(&tasklist_lock);
4279
4280out_nounlock:
4281        return retval;
4282}
4283
4284/**
4285 * sys_sched_getscheduler - get the RT priority of a thread
4286 * @pid: the pid in question.
4287 * @param: structure containing the RT priority.
4288 */
4289asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
4290{
4291        struct sched_param lp;
4292        struct task_struct *p;
4293        int retval = -EINVAL;
4294
4295        if (!param || pid < 0)
4296                goto out_nounlock;
4297
4298        read_lock(&tasklist_lock);
4299        p = find_process_by_pid(pid);
4300        retval = -ESRCH;
4301        if (!p)
4302                goto out_unlock;
4303
4304        retval = security_task_getscheduler(p);
4305        if (retval)
4306                goto out_unlock;
4307
4308        lp.sched_priority = p->rt_priority;
4309        read_unlock(&tasklist_lock);
4310
4311        /*
4312         * This one might sleep, we cannot do it with a spinlock held ...
4313         */
4314        retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4315
4316out_nounlock:
4317        return retval;
4318
4319out_unlock:
4320        read_unlock(&tasklist_lock);
4321        return retval;
4322}
4323
4324long sched_setaffinity(pid_t pid, cpumask_t new_mask)
4325{
4326        cpumask_t cpus_allowed;
4327        struct task_struct *p;
4328        int retval;
4329
4330        lock_cpu_hotplug();
4331        read_lock(&tasklist_lock);
4332
4333        p = find_process_by_pid(pid);
4334        if (!p) {
4335                read_unlock(&tasklist_lock);
4336                unlock_cpu_hotplug();
4337                return -ESRCH;
4338        }
4339
4340        /*
4341         * It is not safe to call set_cpus_allowed with the
4342         * tasklist_lock held.  We will bump the task_struct's
4343         * usage count and then drop tasklist_lock.
4344         */
4345        get_task_struct(p);
4346        read_unlock(&tasklist_lock);
4347
4348        retval = -EPERM;
4349        if ((current->euid != p->euid) && (current->euid != p->uid) &&
4350                        !capable(CAP_SYS_NICE))
4351                goto out_unlock;
4352
4353        retval = security_task_setscheduler(p, 0, NULL);
4354        if (retval)
4355                goto out_unlock;
4356
4357        cpus_allowed = cpuset_cpus_allowed(p);
4358        cpus_and(new_mask, new_mask, cpus_allowed);
4359        retval = set_cpus_allowed(p, new_mask);
4360
4361out_unlock:
4362        put_task_struct(p);
4363        unlock_cpu_hotplug();
4364        return retval;
4365}
4366
4367static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
4368                             cpumask_t *new_mask)
4369{
4370        if (len < sizeof(cpumask_t)) {
4371                memset(new_mask, 0, sizeof(cpumask_t));
4372        } else if (len > sizeof(cpumask_t)) {
4373                len = sizeof(cpumask_t);
4374        }
4375        return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4376}
4377
4378/**
4379 * sys_sched_setaffinity - set the cpu affinity of a process
4380 * @pid: pid of the process
4381 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4382 * @user_mask_ptr: user-space pointer to the new cpu mask
4383 */
4384asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
4385                                      unsigned long __user *user_mask_ptr)
4386{
4387        cpumask_t new_mask;
4388        int retval;
4389
4390        retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask);
4391        if (retval)
4392                return retval;
4393
4394        return sched_setaffinity(pid, new_mask);
4395}
4396
4397/*
4398 * Represents all cpu's present in the system
4399 * In systems capable of hotplug, this map could dynamically grow
4400 * as new cpu's are detected in the system via any platform specific
4401 * method, such as ACPI for e.g.
4402 */
4403
4404cpumask_t cpu_present_map __read_mostly;
4405EXPORT_SYMBOL(cpu_present_map);
4406
4407#ifndef CONFIG_SMP
4408cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
4409EXPORT_SYMBOL(cpu_online_map);
4410
4411cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
4412EXPORT_SYMBOL(cpu_possible_map);
4413#endif
4414
4415long sched_getaffinity(pid_t pid, cpumask_t *mask)
4416{
4417        struct task_struct *p;
4418        int retval;
4419
4420        lock_cpu_hotplug();
4421        read_lock(&tasklist_lock);
4422
4423        retval = -ESRCH;
4424        p = find_process_by_pid(pid);
4425        if (!p)
4426                goto out_unlock;
4427
4428        retval = security_task_getscheduler(p);
4429        if (retval)
4430                goto out_unlock;
4431
4432        cpus_and(*mask, p->cpus_allowed, cpu_online_map);
4433
4434out_unlock:
4435        read_unlock(&tasklist_lock);
4436        unlock_cpu_hotplug();
4437        if (retval)
4438                return retval;
4439
4440        return 0;
4441}
4442
4443/**
4444 * sys_sched_getaffinity - get the cpu affinity of a process
4445 * @pid: pid of the process
4446 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4447 * @user_mask_ptr: user-space pointer to hold the current cpu mask
4448 */
4449asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
4450                                      unsigned long __user *user_mask_ptr)
4451{
4452        int ret;
4453        cpumask_t mask;
4454
4455        if (len < sizeof(cpumask_t))
4456                return -EINVAL;
4457
4458        ret = sched_getaffinity(pid, &mask);
4459        if (ret < 0)
4460                return ret;
4461
4462        if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t)))
4463                return -EFAULT;
4464
4465        return sizeof(cpumask_t);
4466}
4467
4468/**
4469 * sys_sched_yield - yield the current processor to other threads.
4470 *
4471 * this function yields the current CPU by moving the calling thread
4472 * to the expired array. If there are no other threads running on this
4473 * CPU then this function will return.
4474 */
4475asmlinkage long sys_sched_yield(void)
4476{
4477        struct rq *rq = this_rq_lock();
4478        struct prio_array *array = current->array, *target = rq->expired;
4479
4480        schedstat_inc(rq, yld_cnt);
4481        /*
4482         * We implement yielding by moving the task into the expired
4483         * queue.
4484         *
4485         * (special rule: RT tasks will just roundrobin in the active
4486         *  array.)
4487         */
4488        if (rt_task(current))
4489                target = rq->active;
4490
4491        if (array->nr_active == 1) {
4492                schedstat_inc(rq, yld_act_empty);
4493                if (!rq->expired->nr_active)
4494                        schedstat_inc(rq, yld_both_empty);
4495        } else if (!rq->expired->nr_active)
4496                schedstat_inc(rq, yld_exp_empty);
4497
4498        if (array != target) {
4499                dequeue_task(current, array);
4500                enqueue_task(current, target);
4501        } else
4502                /*
4503                 * requeue_task is cheaper so perform that if possible.
4504                 */
4505                requeue_task(current, array);
4506
4507        /*
4508         * Since we are going to call schedule() anyway, there's
4509         * no need to preempt or enable interrupts:
4510         */
4511        __release(rq->lock);
4512        spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
4513        _raw_spin_unlock(&rq->lock);
4514        preempt_enable_no_resched();
4515
4516        schedule();
4517
4518        return 0;
4519}
4520
4521static inline int __resched_legal(int expected_preempt_count)
4522{
4523        if (unlikely(preempt_count() != expected_preempt_count))
4524                return 0;
4525        if (unlikely(system_state != SYSTEM_RUNNING))
4526                return 0;
4527        return 1;
4528}
4529
4530static void __cond_resched(void)
4531{
4532#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
4533        __might_sleep(__FILE__, __LINE__);
4534#endif
4535        /*
4536         * The BKS might be reacquired before we have dropped
4537         * PREEMPT_ACTIVE, which could trigger a second
4538         * cond_resched() call.
4539         */
4540        do {
4541                add_preempt_count(PREEMPT_ACTIVE);
4542                schedule();
4543                sub_preempt_count(PREEMPT_ACTIVE);
4544        } while (need_resched());
4545}
4546
4547int __sched cond_resched(void)
4548{
4549        if (need_resched() && __resched_legal(0)) {
4550                __cond_resched();
4551                return 1;
4552        }
4553        return 0;
4554}
4555EXPORT_SYMBOL(cond_resched);
4556
4557/*
4558 * cond_resched_lock() - if a reschedule is pending, drop the given lock,
4559 * call schedule, and on return reacquire the lock.
4560 *
4561 * This works OK both with and without CONFIG_PREEMPT.  We do strange low-level
4562 * operations here to prevent schedule() from being called twice (once via
4563 * spin_unlock(), once by hand).
4564 */
4565int cond_resched_lock(spinlock_t *lock)
4566{
4567        int ret = 0;
4568
4569        if (need_lockbreak(lock)) {
4570                spin_unlock(lock);
4571                cpu_relax();
4572                ret = 1;
4573                spin_lock(lock);
4574        }
4575        if (need_resched() && __resched_legal(1)) {
4576                spin_release(&lock->dep_map, 1, _THIS_IP_);
4577                _raw_spin_unlock(lock);
4578                preempt_enable_no_resched();
4579                __cond_resched();
4580                ret = 1;
4581                spin_lock(lock);
4582        }
4583        return ret;
4584}
4585EXPORT_SYMBOL(cond_resched_lock);
4586
4587int __sched cond_resched_softirq(void)
4588{
4589        BUG_ON(!in_softirq());
4590
4591        if (need_resched() && __resched_legal(0)) {
4592                raw_local_irq_disable();
4593                _local_bh_enable();
4594                raw_local_irq_enable();
4595                __cond_resched();
4596                local_bh_disable();
4597                return 1;
4598        }
4599        return 0;
4600}
4601EXPORT_SYMBOL(cond_resched_softirq);
4602
4603/**
4604 * yield - yield the current processor to other threads.
4605 *
4606 * this is a shortcut for kernel-space yielding - it marks the
4607 * thread runnable and calls sys_sched_yield().
4608 */
4609void __sched yield(void)
4610{
4611        set_current_state(TASK_RUNNING);
4612        sys_sched_yield();
4613}
4614EXPORT_SYMBOL(yield);
4615
4616/*
4617 * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
4618 * that process accounting knows that this is a task in IO wait state.
4619 *
4620 * But don't do that if it is a deliberate, throttling IO wait (this task
4621 * has set its backing_dev_info: the queue against which it should throttle)
4622 */
4623void __sched io_schedule(void)
4624{
4625        struct rq *rq = &__raw_get_cpu_var(runqueues);
4626
4627        delayacct_blkio_start();
4628        atomic_inc(&rq->nr_iowait);
4629        schedule();
4630        atomic_dec(&rq->nr_iowait);
4631        delayacct_blkio_end();
4632}
4633EXPORT_SYMBOL(io_schedule);
4634
4635long __sched io_schedule_timeout(long timeout)
4636{
4637        struct rq *rq = &__raw_get_cpu_var(runqueues);
4638        long ret;
4639
4640        delayacct_blkio_start();
4641        atomic_inc(&rq->nr_iowait);
4642        ret = schedule_timeout(timeout);
4643        atomic_dec(&rq->nr_iowait);
4644        delayacct_blkio_end();
4645        return ret;
4646}
4647
4648/**
4649 * sys_sched_get_priority_max - return maximum RT priority.
4650 * @policy: scheduling class.
4651 *
4652 * this syscall returns the maximum rt_priority that can be used
4653 * by a given scheduling class.
4654 */
4655asmlinkage long sys_sched_get_priority_max(int policy)
4656{
4657        int ret = -EINVAL;
4658
4659        switch (policy) {
4660        case SCHED_FIFO:
4661        case SCHED_RR:
4662                ret = MAX_USER_RT_PRIO-1;
4663                break;
4664        case SCHED_NORMAL:
4665        case SCHED_BATCH:
4666                ret = 0;
4667                break;
4668        }
4669        return ret;
4670}
4671
4672/**
4673 * sys_sched_get_priority_min - return minimum RT priority.
4674 * @policy: scheduling class.
4675 *
4676 * this syscall returns the minimum rt_priority that can be used
4677 * by a given scheduling class.
4678 */
4679asmlinkage long sys_sched_get_priority_min(int policy)
4680{
4681        int ret = -EINVAL;
4682
4683        switch (policy) {
4684        case SCHED_FIFO:
4685        case SCHED_RR:
4686                ret = 1;
4687                break;
4688        case SCHED_NORMAL:
4689        case SCHED_BATCH:
4690                ret = 0;
4691        }
4692        return ret;
4693}
4694
4695/**
4696 * sys_sched_rr_get_interval - return the default timeslice of a process.
4697 * @pid: pid of the process.
4698 * @interval: userspace pointer to the timeslice value.
4699 *
4700 * this syscall writes the default timeslice value of a given process
4701 * into the user-space timespec buffer. A value of '0' means infinity.
4702 */
4703asmlinkage
4704long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
4705{
4706        struct task_struct *p;
4707        int retval = -EINVAL;
4708        struct timespec t;
4709
4710        if (pid < 0)
4711                goto out_nounlock;
4712
4713        retval = -ESRCH;
4714        read_lock(&tasklist_lock);
4715        p = find_process_by_pid(pid);
4716        if (!p)
4717                goto out_unlock;
4718
4719        retval = security_task_getscheduler(p);
4720        if (retval)
4721                goto out_unlock;
4722
4723        jiffies_to_timespec(p->policy == SCHED_FIFO ?
4724                                0 : task_timeslice(p), &t);
4725        read_unlock(&tasklist_lock);
4726        retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
4727out_nounlock:
4728        return retval;
4729out_unlock:
4730        read_unlock(&tasklist_lock);
4731        return retval;
4732}
4733
4734static inline struct task_struct *eldest_child(struct task_struct *p)
4735{
4736        if (list_empty(&p->children))
4737                return NULL;
4738        return list_entry(p->children.next,struct task_struct,sibling);
4739}
4740
4741static inline struct task_struct *older_sibling(struct task_struct *p)
4742{
4743        if (p->sibling.prev==&p->parent->children)
4744                return NULL;
4745        return list_entry(p->sibling.prev,struct task_struct,sibling);
4746}
4747
4748static inline struct task_struct *younger_sibling(struct task_struct *p)
4749{
4750        if (p->sibling.next==&p->parent->children)
4751                return NULL;
4752        return list_entry(p->sibling.next,struct task_struct,sibling);
4753}
4754
4755static const char stat_nam[] = "RSDTtZX";
4756
4757static void show_task(struct task_struct *p)
4758{
4759        struct task_struct *relative;
4760        unsigned long free = 0;
4761        unsigned state;
4762
4763        state = p->state ? __ffs(p->state) + 1 : 0;
4764        printk("%-13.13s %c", p->comm,
4765                state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4766#if (BITS_PER_LONG == 32)
4767        if (state == TASK_RUNNING)
4768                printk(" running ");
4769        else
4770                printk(" %08lX ", thread_saved_pc(p));
4771#else
4772        if (state == TASK_RUNNING)
4773                printk("  running task   ");
4774        else
4775                printk(" %016lx ", thread_saved_pc(p));
4776#endif
4777#ifdef CONFIG_DEBUG_STACK_USAGE
4778        {
4779                unsigned long *n = end_of_stack(p);
4780                while (!*n)
4781                        n++;
4782                free = (unsigned long)n - (unsigned long)end_of_stack(p);
4783        }
4784#endif
4785        printk("%5lu %5d %6d ", free, p->pid, p->parent->pid);
4786        if ((relative = eldest_child(p)))
4787                printk("%5d ", relative->pid);
4788        else
4789                printk("      ");
4790        if ((relative = younger_sibling(p)))
4791                printk("%7d", relative->pid);
4792        else
4793                printk("       ");
4794        if ((relative = older_sibling(p)))
4795                printk(" %5d", relative->pid);
4796        else
4797                printk("      ");
4798        if (!p->mm)
4799                printk(" (L-TLB)\n");
4800        else
4801                printk(" (NOTLB)\n");
4802
4803        if (state != TASK_RUNNING)
4804                show_stack(p, NULL);
4805}
4806
4807void show_state(void)
4808{
4809        struct task_struct *g, *p;
4810
4811#if (BITS_PER_LONG == 32)
4812        printk("\n"
4813               "                                               sibling\n");
4814        printk("  task             PC      pid father child younger older\n");
4815#else
4816        printk("\n"
4817               "                                                       sibling\n");
4818        printk("  task                 PC          pid father child younger older\n");
4819#endif
4820        read_lock(&tasklist_lock);
4821        do_each_thread(g, p) {
4822                /*
4823                 * reset the NMI-timeout, listing all files on a slow
4824                 * console might take alot of time:
4825                 */
4826                touch_nmi_watchdog();
4827                show_task(p);
4828        } while_each_thread(g, p);
4829
4830        read_unlock(&tasklist_lock);
4831        debug_show_all_locks();
4832}
4833
4834/**
4835 * init_idle - set up an idle thread for a given CPU
4836 * @idle: task in question
4837 * @cpu: cpu the idle task belongs to
4838 *
4839 * NOTE: this function does not set the idle thread's NEED_RESCHED
4840 * flag, to make booting more robust.
4841 */
4842void __cpuinit init_idle(struct task_struct *idle, int cpu)
4843{
4844        struct rq *rq = cpu_rq(cpu);
4845        unsigned long flags;
4846
4847        idle->timestamp = sched_clock();
4848        idle->sleep_avg = 0;
4849        idle->array = NULL;
4850        idle->prio = idle->normal_prio = MAX_PRIO;
4851        idle->state = TASK_RUNNING;
4852        idle->cpus_allowed = cpumask_of_cpu(cpu);
4853        set_task_cpu(idle, cpu);
4854
4855        spin_lock_irqsave(&rq->lock, flags);
4856        rq->curr = rq->idle = idle;
4857#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
4858        idle->oncpu = 1;
4859#endif
4860        spin_unlock_irqrestore(&rq->lock, flags);
4861
4862        /* Set the preempt count _outside_ the spinlocks! */
4863#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
4864        task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
4865#else
4866        task_thread_info(idle)->preempt_count = 0;
4867#endif
4868}
4869
4870/*
4871 * In a system that switches off the HZ timer nohz_cpu_mask
4872 * indicates which cpus entered this state. This is used
4873 * in the rcu update to wait only for active cpus. For system
4874 * which do not switch off the HZ timer nohz_cpu_mask should
4875 * always be CPU_MASK_NONE.
4876 */
4877cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
4878
4879#ifdef CONFIG_SMP
4880/*
4881 * This is how migration works:
4882 *
4883 * 1) we queue a struct migration_req structure in the source CPU's
4884 *    runqueue and wake up that CPU's migration thread.
4885 * 2) we down() the locked semaphore => thread blocks.
4886 * 3) migration thread wakes up (implicitly it forces the migrated
4887 *    thread off the CPU)
4888 * 4) it gets the migration request and checks whether the migrated
4889 *    task is still in the wrong runqueue.
4890 * 5) if it's in the wrong runqueue then the migration thread removes
4891 *    it and puts it into the right queue.
4892 * 6) migration thread up()s the semaphore.
4893 * 7) we wake up and the migration is done.
4894 */
4895
4896/*
4897 * Change a given task's CPU affinity. Migrate the thread to a
4898 * proper CPU and schedule it away if the CPU it's executing on
4899 * is removed from the allowed bitmask.
4900 *
4901 * NOTE: the caller must have a valid reference to the task, the
4902 * task must not exit() & deallocate itself prematurely.  The
4903 * call is not atomic; no spinlocks may be held.
4904 */
4905int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
4906{
4907        struct migration_req req;
4908        unsigned long flags;
4909        struct rq *rq;
4910        int ret = 0;
4911
4912        rq = task_rq_lock(p, &flags);
4913        if (!cpus_intersects(new_mask, cpu_online_map)) {
4914                ret = -EINVAL;
4915                goto out;
4916        }
4917
4918        p->cpus_allowed = new_mask;
4919        /* Can the task run on the task's current CPU? If so, we're done */
4920        if (cpu_isset(task_cpu(p), new_mask))
4921                goto out;
4922
4923        if (migrate_task(p, any_online_cpu(new_mask), &req)) {
4924                /* Need help from migration thread: drop lock and wait. */
4925                task_rq_unlock(rq, &flags);
4926                wake_up_process(rq->migration_thread);
4927                wait_for_completion(&req.done);
4928                tlb_migrate_finish(p->mm);
4929                return 0;
4930        }
4931out:
4932        task_rq_unlock(rq, &flags);
4933
4934        return ret;
4935}
4936EXPORT_SYMBOL_GPL(set_cpus_allowed);
4937
4938/*
4939 * Move (not current) task off this cpu, onto dest cpu.  We're doing
4940 * this because either it can't run here any more (set_cpus_allowed()
4941 * away from this CPU, or CPU going down), or because we're
4942 * attempting to rebalance this task on exec (sched_exec).
4943 *
4944 * So we race with normal scheduler movements, but that's OK, as long
4945 * as the task is no longer on this CPU.
4946 *
4947 * Returns non-zero if task was successfully migrated.
4948 */
4949static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
4950{
4951        struct rq *rq_dest, *rq_src;
4952        int ret = 0;
4953
4954        if (unlikely(cpu_is_offline(dest_cpu)))
4955                return ret;
4956
4957        rq_src = cpu_rq(src_cpu);
4958        rq_dest = cpu_rq(dest_cpu);
4959
4960        double_rq_lock(rq_src, rq_dest);
4961        /* Already moved. */
4962        if (task_cpu(p) != src_cpu)
4963                goto out;
4964        /* Affinity changed (again). */
4965        if (!cpu_isset(dest_cpu, p->cpus_allowed))
4966                goto out;
4967
4968        set_task_cpu(p, dest_cpu);
4969        if (p->array) {
4970                /*
4971                 * Sync timestamp with rq_dest's before activating.
4972                 * The same thing could be achieved by doing this step
4973                 * afterwards, and pretending it was a local activate.
4974                 * This way is cleaner and logically correct.
4975                 */
4976                p->timestamp = p->timestamp - rq_src->timestamp_last_tick
4977                                + rq_dest->timestamp_last_tick;
4978                deactivate_task(p, rq_src);
4979                __activate_task(p, rq_dest);
4980                if (TASK_PREEMPTS_CURR(p, rq_dest))
4981                        resched_task(rq_dest->curr);
4982        }
4983        ret = 1;
4984out:
4985        double_rq_unlock(rq_src, rq_dest);
4986        return ret;
4987}
4988
4989/*
4990 * migration_thread - this is a highprio system thread that performs
4991 * thread migration by bumping thread off CPU then 'pushing' onto
4992 * another runqueue.
4993 */
4994static int migration_thread(void *data)
4995{
4996        int cpu = (long)data;
4997        struct rq *rq;
4998
4999        rq = cpu_rq(cpu);
5000        BUG_ON(rq->migration_thread != current);
5001
5002        set_current_state(TASK_INTERRUPTIBLE);
5003        while (!kthread_should_stop()) {
5004                struct migration_req *req;
5005                struct list_head *head;
5006
5007                try_to_freeze();
5008
5009                spin_lock_irq(&rq->lock);
5010
5011                if (cpu_is_offline(cpu)) {
5012                        spin_unlock_irq(&rq->lock);
5013                        goto wait_to_die;
5014                }
5015
5016                if (rq->active_balance) {
5017                        active_load_balance(rq, cpu);
5018                        rq->active_balance = 0;
5019                }
5020
5021                head = &rq->migration_queue;
5022
5023                if (list_empty(head)) {
5024                        spin_unlock_irq(&rq->lock);
5025                        schedule();
5026                        set_current_state(TASK_INTERRUPTIBLE);
5027                        continue;
5028                }
5029                req = list_entry(head->next, struct migration_req, list);
5030                list_del_init(head->next);
5031
5032                spin_unlock(&rq->lock);
5033                __migrate_task(req->task, cpu, req->dest_cpu);
5034                local_irq_enable();
5035
5036                complete(&req->done);
5037        }
5038        __set_current_state(TASK_RUNNING);
5039        return 0;
5040
5041wait_to_die:
5042        /* Wait for kthread_stop */
5043        set_current_state(TASK_INTERRUPTIBLE);
5044        while (!kthread_should_stop()) {
5045                schedule();
5046                set_current_state(TASK_INTERRUPTIBLE);
5047        }
5048        __set_current_state(TASK_RUNNING);
5049        return 0;
5050}
5051
5052#ifdef CONFIG_HOTPLUG_CPU
5053/* Figure out where task on dead CPU should go, use force if neccessary. */
5054static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
5055{
5056        unsigned long flags;
5057        cpumask_t mask;
5058        struct rq *rq;
5059        int dest_cpu;
5060
5061restart:
5062        /* On same node? */
5063        mask = node_to_cpumask(cpu_to_node(dead_cpu));
5064        cpus_and(mask, mask, p->cpus_allowed);
5065        dest_cpu = any_online_cpu(mask);
5066
5067        /* On any allowed CPU? */
5068        if (dest_cpu == NR_CPUS)
5069                dest_cpu = any_online_cpu(p->cpus_allowed);
5070
5071        /* No more Mr. Nice Guy. */
5072        if (dest_cpu == NR_CPUS) {
5073                rq = task_rq_lock(p, &flags);
5074                cpus_setall(p->cpus_allowed);
5075                dest_cpu = any_online_cpu(p->cpus_allowed);
5076                task_rq_unlock(rq, &flags);
5077
5078                /*
5079                 * Don't tell them about moving exiting tasks or
5080                 * kernel threads (both mm NULL), since they never
5081                 * leave kernel.
5082                 */
5083                if (p->mm && printk_ratelimit())
5084                        printk(KERN_INFO "process %d (%s) no "
5085                               "longer affine to cpu%d\n",
5086                               p->pid, p->comm, dead_cpu);
5087        }
5088        if (!__migrate_task(p, dead_cpu, dest_cpu))
5089                goto restart;
5090}
5091
5092/*
5093 * While a dead CPU has no uninterruptible tasks queued at this point,
5094 * it might still have a nonzero ->nr_uninterruptible counter, because
5095 * for performance reasons the counter is not stricly tracking tasks to
5096 * their home CPUs. So we just add the counter to another CPU's counter,
5097 * to keep the global sum constant after CPU-down:
5098 */
5099static void migrate_nr_uninterruptible(struct rq *rq_src)
5100{
5101        struct rq *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL));
5102        unsigned long flags;
5103
5104        local_irq_save(flags);
5105        double_rq_lock(rq_src, rq_dest);
5106        rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
5107        rq_src->nr_uninterruptible = 0;
5108        double_rq_unlock(rq_src, rq_dest);
5109        local_irq_restore(flags);
5110}
5111
5112/* Run through task list and migrate tasks from the dead cpu. */
5113static void migrate_live_tasks(int src_cpu)
5114{
5115        struct task_struct *p, *t;
5116
5117        write_lock_irq(&tasklist_lock);
5118
5119        do_each_thread(t, p) {
5120                if (p == current)
5121