linux/kernel/sched_fair.c
<<
>>
Prefs
   1/*
   2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
   3 *
   4 *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   5 *
   6 *  Interactivity improvements by Mike Galbraith
   7 *  (C) 2007 Mike Galbraith <efault@gmx.de>
   8 *
   9 *  Various enhancements by Dmitry Adamushko.
  10 *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
  11 *
  12 *  Group scheduling enhancements by Srivatsa Vaddagiri
  13 *  Copyright IBM Corporation, 2007
  14 *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
  15 *
  16 *  Scaled math optimizations by Thomas Gleixner
  17 *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
  18 *
  19 *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
  20 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  21 */
  22
  23#include <linux/latencytop.h>
  24
  25/*
  26 * Targeted preemption latency for CPU-bound tasks:
  27 * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds)
  28 *
  29 * NOTE: this latency value is not the same as the concept of
  30 * 'timeslice length' - timeslices in CFS are of variable length
  31 * and have no persistent notion like in traditional, time-slice
  32 * based scheduling concepts.
  33 *
  34 * (to see the precise effective timeslice length of your workload,
  35 *  run vmstat and monitor the context-switches (cs) field)
  36 */
  37unsigned int sysctl_sched_latency = 20000000ULL;
  38
  39/*
  40 * Minimal preemption granularity for CPU-bound tasks:
  41 * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds)
  42 */
  43unsigned int sysctl_sched_min_granularity = 4000000ULL;
  44
  45/*
  46 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
  47 */
  48static unsigned int sched_nr_latency = 5;
  49
  50/*
  51 * After fork, child runs first. (default) If set to 0 then
  52 * parent will (try to) run first.
  53 */
  54const_debug unsigned int sysctl_sched_child_runs_first = 1;
  55
  56/*
  57 * sys_sched_yield() compat mode
  58 *
  59 * This option switches the agressive yield implementation of the
  60 * old scheduler back on.
  61 */
  62unsigned int __read_mostly sysctl_sched_compat_yield;
  63
  64/*
  65 * SCHED_OTHER wake-up granularity.
  66 * (default: 5 msec * (1 + ilog(ncpus)), units: nanoseconds)
  67 *
  68 * This option delays the preemption effects of decoupled workloads
  69 * and reduces their over-scheduling. Synchronous workloads will still
  70 * have immediate wakeup/sleep latencies.
  71 */
  72unsigned int sysctl_sched_wakeup_granularity = 5000000UL;
  73
  74const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
  75
  76static const struct sched_class fair_sched_class;
  77
  78/**************************************************************
  79 * CFS operations on generic schedulable entities:
  80 */
  81
  82static inline struct task_struct *task_of(struct sched_entity *se)
  83{
  84        return container_of(se, struct task_struct, se);
  85}
  86
  87#ifdef CONFIG_FAIR_GROUP_SCHED
  88
  89/* cpu runqueue to which this cfs_rq is attached */
  90static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
  91{
  92        return cfs_rq->rq;
  93}
  94
  95/* An entity is a task if it doesn't "own" a runqueue */
  96#define entity_is_task(se)      (!se->my_q)
  97
  98/* Walk up scheduling entities hierarchy */
  99#define for_each_sched_entity(se) \
 100                for (; se; se = se->parent)
 101
 102static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
 103{
 104        return p->se.cfs_rq;
 105}
 106
 107/* runqueue on which this entity is (to be) queued */
 108static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
 109{
 110        return se->cfs_rq;
 111}
 112
 113/* runqueue "owned" by this group */
 114static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
 115{
 116        return grp->my_q;
 117}
 118
 119/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
 120 * another cpu ('this_cpu')
 121 */
 122static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
 123{
 124        return cfs_rq->tg->cfs_rq[this_cpu];
 125}
 126
 127/* Iterate thr' all leaf cfs_rq's on a runqueue */
 128#define for_each_leaf_cfs_rq(rq, cfs_rq) \
 129        list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
 130
 131/* Do the two (enqueued) entities belong to the same group ? */
 132static inline int
 133is_same_group(struct sched_entity *se, struct sched_entity *pse)
 134{
 135        if (se->cfs_rq == pse->cfs_rq)
 136                return 1;
 137
 138        return 0;
 139}
 140
 141static inline struct sched_entity *parent_entity(struct sched_entity *se)
 142{
 143        return se->parent;
 144}
 145
 146/* return depth at which a sched entity is present in the hierarchy */
 147static inline int depth_se(struct sched_entity *se)
 148{
 149        int depth = 0;
 150
 151        for_each_sched_entity(se)
 152                depth++;
 153
 154        return depth;
 155}
 156
 157static void
 158find_matching_se(struct sched_entity **se, struct sched_entity **pse)
 159{
 160        int se_depth, pse_depth;
 161
 162        /*
 163         * preemption test can be made between sibling entities who are in the
 164         * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
 165         * both tasks until we find their ancestors who are siblings of common
 166         * parent.
 167         */
 168
 169        /* First walk up until both entities are at same depth */
 170        se_depth = depth_se(*se);
 171        pse_depth = depth_se(*pse);
 172
 173        while (se_depth > pse_depth) {
 174                se_depth--;
 175                *se = parent_entity(*se);
 176        }
 177
 178        while (pse_depth > se_depth) {
 179                pse_depth--;
 180                *pse = parent_entity(*pse);
 181        }
 182
 183        while (!is_same_group(*se, *pse)) {
 184                *se = parent_entity(*se);
 185                *pse = parent_entity(*pse);
 186        }
 187}
 188
 189#else   /* CONFIG_FAIR_GROUP_SCHED */
 190
 191static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
 192{
 193        return container_of(cfs_rq, struct rq, cfs);
 194}
 195
 196#define entity_is_task(se)      1
 197
 198#define for_each_sched_entity(se) \
 199                for (; se; se = NULL)
 200
 201static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
 202{
 203        return &task_rq(p)->cfs;
 204}
 205
 206static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
 207{
 208        struct task_struct *p = task_of(se);
 209        struct rq *rq = task_rq(p);
 210
 211        return &rq->cfs;
 212}
 213
 214/* runqueue "owned" by this group */
 215static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
 216{
 217        return NULL;
 218}
 219
 220static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
 221{
 222        return &cpu_rq(this_cpu)->cfs;
 223}
 224
 225#define for_each_leaf_cfs_rq(rq, cfs_rq) \
 226                for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
 227
 228static inline int
 229is_same_group(struct sched_entity *se, struct sched_entity *pse)
 230{
 231        return 1;
 232}
 233
 234static inline struct sched_entity *parent_entity(struct sched_entity *se)
 235{
 236        return NULL;
 237}
 238
 239static inline void
 240find_matching_se(struct sched_entity **se, struct sched_entity **pse)
 241{
 242}
 243
 244#endif  /* CONFIG_FAIR_GROUP_SCHED */
 245
 246
 247/**************************************************************
 248 * Scheduling class tree data structure manipulation methods:
 249 */
 250
 251static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
 252{
 253        s64 delta = (s64)(vruntime - min_vruntime);
 254        if (delta > 0)
 255                min_vruntime = vruntime;
 256
 257        return min_vruntime;
 258}
 259
 260static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
 261{
 262        s64 delta = (s64)(vruntime - min_vruntime);
 263        if (delta < 0)
 264                min_vruntime = vruntime;
 265
 266        return min_vruntime;
 267}
 268
 269static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
 270{
 271        return se->vruntime - cfs_rq->min_vruntime;
 272}
 273
 274static void update_min_vruntime(struct cfs_rq *cfs_rq)
 275{
 276        u64 vruntime = cfs_rq->min_vruntime;
 277
 278        if (cfs_rq->curr)
 279                vruntime = cfs_rq->curr->vruntime;
 280
 281        if (cfs_rq->rb_leftmost) {
 282                struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
 283                                                   struct sched_entity,
 284                                                   run_node);
 285
 286                if (!cfs_rq->curr)
 287                        vruntime = se->vruntime;
 288                else
 289                        vruntime = min_vruntime(vruntime, se->vruntime);
 290        }
 291
 292        cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
 293}
 294
 295/*
 296 * Enqueue an entity into the rb-tree:
 297 */
 298static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 299{
 300        struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
 301        struct rb_node *parent = NULL;
 302        struct sched_entity *entry;
 303        s64 key = entity_key(cfs_rq, se);
 304        int leftmost = 1;
 305
 306        /*
 307         * Find the right place in the rbtree:
 308         */
 309        while (*link) {
 310                parent = *link;
 311                entry = rb_entry(parent, struct sched_entity, run_node);
 312                /*
 313                 * We dont care about collisions. Nodes with
 314                 * the same key stay together.
 315                 */
 316                if (key < entity_key(cfs_rq, entry)) {
 317                        link = &parent->rb_left;
 318                } else {
 319                        link = &parent->rb_right;
 320                        leftmost = 0;
 321                }
 322        }
 323
 324        /*
 325         * Maintain a cache of leftmost tree entries (it is frequently
 326         * used):
 327         */
 328        if (leftmost)
 329                cfs_rq->rb_leftmost = &se->run_node;
 330
 331        rb_link_node(&se->run_node, parent, link);
 332        rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
 333}
 334
 335static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 336{
 337        if (cfs_rq->rb_leftmost == &se->run_node) {
 338                struct rb_node *next_node;
 339
 340                next_node = rb_next(&se->run_node);
 341                cfs_rq->rb_leftmost = next_node;
 342        }
 343
 344        rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
 345}
 346
 347static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
 348{
 349        struct rb_node *left = cfs_rq->rb_leftmost;
 350
 351        if (!left)
 352                return NULL;
 353
 354        return rb_entry(left, struct sched_entity, run_node);
 355}
 356
 357static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
 358{
 359        struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
 360
 361        if (!last)
 362                return NULL;
 363
 364        return rb_entry(last, struct sched_entity, run_node);
 365}
 366
 367/**************************************************************
 368 * Scheduling class statistics methods:
 369 */
 370
 371#ifdef CONFIG_SCHED_DEBUG
 372int sched_nr_latency_handler(struct ctl_table *table, int write,
 373                struct file *filp, void __user *buffer, size_t *lenp,
 374                loff_t *ppos)
 375{
 376        int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
 377
 378        if (ret || !write)
 379                return ret;
 380
 381        sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
 382                                        sysctl_sched_min_granularity);
 383
 384        return 0;
 385}
 386#endif
 387
 388/*
 389 * delta /= w
 390 */
 391static inline unsigned long
 392calc_delta_fair(unsigned long delta, struct sched_entity *se)
 393{
 394        if (unlikely(se->load.weight != NICE_0_LOAD))
 395                delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
 396
 397        return delta;
 398}
 399
 400/*
 401 * The idea is to set a period in which each task runs once.
 402 *
 403 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
 404 * this period because otherwise the slices get too small.
 405 *
 406 * p = (nr <= nl) ? l : l*nr/nl
 407 */
 408static u64 __sched_period(unsigned long nr_running)
 409{
 410        u64 period = sysctl_sched_latency;
 411        unsigned long nr_latency = sched_nr_latency;
 412
 413        if (unlikely(nr_running > nr_latency)) {
 414                period = sysctl_sched_min_granularity;
 415                period *= nr_running;
 416        }
 417
 418        return period;
 419}
 420
 421/*
 422 * We calculate the wall-time slice from the period by taking a part
 423 * proportional to the weight.
 424 *
 425 * s = p*P[w/rw]
 426 */
 427static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 428{
 429        u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
 430
 431        for_each_sched_entity(se) {
 432                struct load_weight *load;
 433
 434                cfs_rq = cfs_rq_of(se);
 435                load = &cfs_rq->load;
 436
 437                if (unlikely(!se->on_rq)) {
 438                        struct load_weight lw = cfs_rq->load;
 439
 440                        update_load_add(&lw, se->load.weight);
 441                        load = &lw;
 442                }
 443                slice = calc_delta_mine(slice, se->load.weight, load);
 444        }
 445        return slice;
 446}
 447
 448/*
 449 * We calculate the vruntime slice of a to be inserted task
 450 *
 451 * vs = s/w
 452 */
 453static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 454{
 455        return calc_delta_fair(sched_slice(cfs_rq, se), se);
 456}
 457
 458/*
 459 * Update the current task's runtime statistics. Skip current tasks that
 460 * are not in our scheduling class.
 461 */
 462static inline void
 463__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
 464              unsigned long delta_exec)
 465{
 466        unsigned long delta_exec_weighted;
 467
 468        schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
 469
 470        curr->sum_exec_runtime += delta_exec;
 471        schedstat_add(cfs_rq, exec_clock, delta_exec);
 472        delta_exec_weighted = calc_delta_fair(delta_exec, curr);
 473        curr->vruntime += delta_exec_weighted;
 474        update_min_vruntime(cfs_rq);
 475}
 476
 477static void update_curr(struct cfs_rq *cfs_rq)
 478{
 479        struct sched_entity *curr = cfs_rq->curr;
 480        u64 now = rq_of(cfs_rq)->clock;
 481        unsigned long delta_exec;
 482
 483        if (unlikely(!curr))
 484                return;
 485
 486        /*
 487         * Get the amount of time the current task was running
 488         * since the last time we changed load (this cannot
 489         * overflow on 32 bits):
 490         */
 491        delta_exec = (unsigned long)(now - curr->exec_start);
 492        if (!delta_exec)
 493                return;
 494
 495        __update_curr(cfs_rq, curr, delta_exec);
 496        curr->exec_start = now;
 497
 498        if (entity_is_task(curr)) {
 499                struct task_struct *curtask = task_of(curr);
 500
 501                cpuacct_charge(curtask, delta_exec);
 502                account_group_exec_runtime(curtask, delta_exec);
 503        }
 504}
 505
 506static inline void
 507update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
 508{
 509        schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
 510}
 511
 512/*
 513 * Task is being enqueued - update stats:
 514 */
 515static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 516{
 517        /*
 518         * Are we enqueueing a waiting task? (for current tasks
 519         * a dequeue/enqueue event is a NOP)
 520         */
 521        if (se != cfs_rq->curr)
 522                update_stats_wait_start(cfs_rq, se);
 523}
 524
 525static void
 526update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
 527{
 528        schedstat_set(se->wait_max, max(se->wait_max,
 529                        rq_of(cfs_rq)->clock - se->wait_start));
 530        schedstat_set(se->wait_count, se->wait_count + 1);
 531        schedstat_set(se->wait_sum, se->wait_sum +
 532                        rq_of(cfs_rq)->clock - se->wait_start);
 533        schedstat_set(se->wait_start, 0);
 534}
 535
 536static inline void
 537update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 538{
 539        /*
 540         * Mark the end of the wait period if dequeueing a
 541         * waiting task:
 542         */
 543        if (se != cfs_rq->curr)
 544                update_stats_wait_end(cfs_rq, se);
 545}
 546
 547/*
 548 * We are picking a new current task - update its stats:
 549 */
 550static inline void
 551update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
 552{
 553        /*
 554         * We are starting a new run period:
 555         */
 556        se->exec_start = rq_of(cfs_rq)->clock;
 557}
 558
 559/**************************************************
 560 * Scheduling class queueing methods:
 561 */
 562
 563#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
 564static void
 565add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
 566{
 567        cfs_rq->task_weight += weight;
 568}
 569#else
 570static inline void
 571add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
 572{
 573}
 574#endif
 575
 576static void
 577account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 578{
 579        update_load_add(&cfs_rq->load, se->load.weight);
 580        if (!parent_entity(se))
 581                inc_cpu_load(rq_of(cfs_rq), se->load.weight);
 582        if (entity_is_task(se)) {
 583                add_cfs_task_weight(cfs_rq, se->load.weight);
 584                list_add(&se->group_node, &cfs_rq->tasks);
 585        }
 586        cfs_rq->nr_running++;
 587        se->on_rq = 1;
 588}
 589
 590static void
 591account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 592{
 593        update_load_sub(&cfs_rq->load, se->load.weight);
 594        if (!parent_entity(se))
 595                dec_cpu_load(rq_of(cfs_rq), se->load.weight);
 596        if (entity_is_task(se)) {
 597                add_cfs_task_weight(cfs_rq, -se->load.weight);
 598                list_del_init(&se->group_node);
 599        }
 600        cfs_rq->nr_running--;
 601        se->on_rq = 0;
 602}
 603
 604static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
 605{
 606#ifdef CONFIG_SCHEDSTATS
 607        if (se->sleep_start) {
 608                u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
 609                struct task_struct *tsk = task_of(se);
 610
 611                if ((s64)delta < 0)
 612                        delta = 0;
 613
 614                if (unlikely(delta > se->sleep_max))
 615                        se->sleep_max = delta;
 616
 617                se->sleep_start = 0;
 618                se->sum_sleep_runtime += delta;
 619
 620                account_scheduler_latency(tsk, delta >> 10, 1);
 621        }
 622        if (se->block_start) {
 623                u64 delta = rq_of(cfs_rq)->clock - se->block_start;
 624                struct task_struct *tsk = task_of(se);
 625
 626                if ((s64)delta < 0)
 627                        delta = 0;
 628
 629                if (unlikely(delta > se->block_max))
 630                        se->block_max = delta;
 631
 632                se->block_start = 0;
 633                se->sum_sleep_runtime += delta;
 634
 635                /*
 636                 * Blocking time is in units of nanosecs, so shift by 20 to
 637                 * get a milliseconds-range estimation of the amount of
 638                 * time that the task spent sleeping:
 639                 */
 640                if (unlikely(prof_on == SLEEP_PROFILING)) {
 641
 642                        profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
 643                                     delta >> 20);
 644                }
 645                account_scheduler_latency(tsk, delta >> 10, 0);
 646        }
 647#endif
 648}
 649
 650static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
 651{
 652#ifdef CONFIG_SCHED_DEBUG
 653        s64 d = se->vruntime - cfs_rq->min_vruntime;
 654
 655        if (d < 0)
 656                d = -d;
 657
 658        if (d > 3*sysctl_sched_latency)
 659                schedstat_inc(cfs_rq, nr_spread_over);
 660#endif
 661}
 662
 663static void
 664place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
 665{
 666        u64 vruntime = cfs_rq->min_vruntime;
 667
 668        /*
 669         * The 'current' period is already promised to the current tasks,
 670         * however the extra weight of the new task will slow them down a
 671         * little, place the new task so that it fits in the slot that
 672         * stays open at the end.
 673         */
 674        if (initial && sched_feat(START_DEBIT))
 675                vruntime += sched_vslice(cfs_rq, se);
 676
 677        if (!initial) {
 678                /* sleeps upto a single latency don't count. */
 679                if (sched_feat(NEW_FAIR_SLEEPERS)) {
 680                        unsigned long thresh = sysctl_sched_latency;
 681
 682                        /*
 683                         * Convert the sleeper threshold into virtual time.
 684                         * SCHED_IDLE is a special sub-class.  We care about
 685                         * fairness only relative to other SCHED_IDLE tasks,
 686                         * all of which have the same weight.
 687                         */
 688                        if (sched_feat(NORMALIZED_SLEEPER) &&
 689                                        task_of(se)->policy != SCHED_IDLE)
 690                                thresh = calc_delta_fair(thresh, se);
 691
 692                        vruntime -= thresh;
 693                }
 694
 695                /* ensure we never gain time by being placed backwards. */
 696                vruntime = max_vruntime(se->vruntime, vruntime);
 697        }
 698
 699        se->vruntime = vruntime;
 700}
 701
 702static void
 703enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
 704{
 705        /*
 706         * Update run-time statistics of the 'current'.
 707         */
 708        update_curr(cfs_rq);
 709        account_entity_enqueue(cfs_rq, se);
 710
 711        if (wakeup) {
 712                place_entity(cfs_rq, se, 0);
 713                enqueue_sleeper(cfs_rq, se);
 714        }
 715
 716        update_stats_enqueue(cfs_rq, se);
 717        check_spread(cfs_rq, se);
 718        if (se != cfs_rq->curr)
 719                __enqueue_entity(cfs_rq, se);
 720}
 721
 722static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
 723{
 724        if (cfs_rq->last == se)
 725                cfs_rq->last = NULL;
 726
 727        if (cfs_rq->next == se)
 728                cfs_rq->next = NULL;
 729}
 730
 731static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
 732{
 733        for_each_sched_entity(se)
 734                __clear_buddies(cfs_rq_of(se), se);
 735}
 736
 737static void
 738dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
 739{
 740        /*
 741         * Update run-time statistics of the 'current'.
 742         */
 743        update_curr(cfs_rq);
 744
 745        update_stats_dequeue(cfs_rq, se);
 746        if (sleep) {
 747#ifdef CONFIG_SCHEDSTATS
 748                if (entity_is_task(se)) {
 749                        struct task_struct *tsk = task_of(se);
 750
 751                        if (tsk->state & TASK_INTERRUPTIBLE)
 752                                se->sleep_start = rq_of(cfs_rq)->clock;
 753                        if (tsk->state & TASK_UNINTERRUPTIBLE)
 754                                se->block_start = rq_of(cfs_rq)->clock;
 755                }
 756#endif
 757        }
 758
 759        clear_buddies(cfs_rq, se);
 760
 761        if (se != cfs_rq->curr)
 762                __dequeue_entity(cfs_rq, se);
 763        account_entity_dequeue(cfs_rq, se);
 764        update_min_vruntime(cfs_rq);
 765}
 766
 767/*
 768 * Preempt the current task with a newly woken task if needed:
 769 */
 770static void
 771check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
 772{
 773        unsigned long ideal_runtime, delta_exec;
 774
 775        ideal_runtime = sched_slice(cfs_rq, curr);
 776        delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
 777        if (delta_exec > ideal_runtime) {
 778                resched_task(rq_of(cfs_rq)->curr);
 779                /*
 780                 * The current task ran long enough, ensure it doesn't get
 781                 * re-elected due to buddy favours.
 782                 */
 783                clear_buddies(cfs_rq, curr);
 784        }
 785}
 786
 787static void
 788set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 789{
 790        /* 'current' is not kept within the tree. */
 791        if (se->on_rq) {
 792                /*
 793                 * Any task has to be enqueued before it get to execute on
 794                 * a CPU. So account for the time it spent waiting on the
 795                 * runqueue.
 796                 */
 797                update_stats_wait_end(cfs_rq, se);
 798                __dequeue_entity(cfs_rq, se);
 799        }
 800
 801        update_stats_curr_start(cfs_rq, se);
 802        cfs_rq->curr = se;
 803#ifdef CONFIG_SCHEDSTATS
 804        /*
 805         * Track our maximum slice length, if the CPU's load is at
 806         * least twice that of our own weight (i.e. dont track it
 807         * when there are only lesser-weight tasks around):
 808         */
 809        if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
 810                se->slice_max = max(se->slice_max,
 811                        se->sum_exec_runtime - se->prev_sum_exec_runtime);
 812        }
 813#endif
 814        se->prev_sum_exec_runtime = se->sum_exec_runtime;
 815}
 816
 817static int
 818wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
 819
 820static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
 821{
 822        struct sched_entity *se = __pick_next_entity(cfs_rq);
 823
 824        if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1)
 825                return cfs_rq->next;
 826
 827        if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1)
 828                return cfs_rq->last;
 829
 830        return se;
 831}
 832
 833static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
 834{
 835        /*
 836         * If still on the runqueue then deactivate_task()
 837         * was not called and update_curr() has to be done:
 838         */
 839        if (prev->on_rq)
 840                update_curr(cfs_rq);
 841
 842        check_spread(cfs_rq, prev);
 843        if (prev->on_rq) {
 844                update_stats_wait_start(cfs_rq, prev);
 845                /* Put 'current' back into the tree. */
 846                __enqueue_entity(cfs_rq, prev);
 847        }
 848        cfs_rq->curr = NULL;
 849}
 850
 851static void
 852entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
 853{
 854        /*
 855         * Update run-time statistics of the 'current'.
 856         */
 857        update_curr(cfs_rq);
 858
 859#ifdef CONFIG_SCHED_HRTICK
 860        /*
 861         * queued ticks are scheduled to match the slice, so don't bother
 862         * validating it and just reschedule.
 863         */
 864        if (queued) {
 865                resched_task(rq_of(cfs_rq)->curr);
 866                return;
 867        }
 868        /*
 869         * don't let the period tick interfere with the hrtick preemption
 870         */
 871        if (!sched_feat(DOUBLE_TICK) &&
 872                        hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
 873                return;
 874#endif
 875
 876        if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
 877                check_preempt_tick(cfs_rq, curr);
 878}
 879
 880/**************************************************
 881 * CFS operations on tasks:
 882 */
 883
 884#ifdef CONFIG_SCHED_HRTICK
 885static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
 886{
 887        struct sched_entity *se = &p->se;
 888        struct cfs_rq *cfs_rq = cfs_rq_of(se);
 889
 890        WARN_ON(task_rq(p) != rq);
 891
 892        if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
 893                u64 slice = sched_slice(cfs_rq, se);
 894                u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
 895                s64 delta = slice - ran;
 896
 897                if (delta < 0) {
 898                        if (rq->curr == p)
 899                                resched_task(p);
 900                        return;
 901                }
 902
 903                /*
 904                 * Don't schedule slices shorter than 10000ns, that just
 905                 * doesn't make sense. Rely on vruntime for fairness.
 906                 */
 907                if (rq->curr != p)
 908                        delta = max_t(s64, 10000LL, delta);
 909
 910                hrtick_start(rq, delta);
 911        }
 912}
 913
 914/*
 915 * called from enqueue/dequeue and updates the hrtick when the
 916 * current task is from our class and nr_running is low enough
 917 * to matter.
 918 */
 919static void hrtick_update(struct rq *rq)
 920{
 921        struct task_struct *curr = rq->curr;
 922
 923        if (curr->sched_class != &fair_sched_class)
 924                return;
 925
 926        if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
 927                hrtick_start_fair(rq, curr);
 928}
 929#else /* !CONFIG_SCHED_HRTICK */
 930static inline void
 931hrtick_start_fair(struct rq *rq, struct task_struct *p)
 932{
 933}
 934
 935static inline void hrtick_update(struct rq *rq)
 936{
 937}
 938#endif
 939
 940/*
 941 * The enqueue_task method is called before nr_running is
 942 * increased. Here we update the fair scheduling stats and
 943 * then put the task into the rbtree:
 944 */
 945static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
 946{
 947        struct cfs_rq *cfs_rq;
 948        struct sched_entity *se = &p->se;
 949
 950        for_each_sched_entity(se) {
 951                if (se->on_rq)
 952                        break;
 953                cfs_rq = cfs_rq_of(se);
 954                enqueue_entity(cfs_rq, se, wakeup);
 955                wakeup = 1;
 956        }
 957
 958        hrtick_update(rq);
 959}
 960
 961/*
 962 * The dequeue_task method is called before nr_running is
 963 * decreased. We remove the task from the rbtree and
 964 * update the fair scheduling stats:
 965 */
 966static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
 967{
 968        struct cfs_rq *cfs_rq;
 969        struct sched_entity *se = &p->se;
 970
 971        for_each_sched_entity(se) {
 972                cfs_rq = cfs_rq_of(se);
 973                dequeue_entity(cfs_rq, se, sleep);
 974                /* Don't dequeue parent if it has other entities besides us */
 975                if (cfs_rq->load.weight)
 976                        break;
 977                sleep = 1;
 978        }
 979
 980        hrtick_update(rq);
 981}
 982
 983/*
 984 * sched_yield() support is very simple - we dequeue and enqueue.
 985 *
 986 * If compat_yield is turned on then we requeue to the end of the tree.
 987 */
 988static void yield_task_fair(struct rq *rq)
 989{
 990        struct task_struct *curr = rq->curr;
 991        struct cfs_rq *cfs_rq = task_cfs_rq(curr);
 992        struct sched_entity *rightmost, *se = &curr->se;
 993
 994        /*
 995         * Are we the only task in the tree?
 996         */
 997        if (unlikely(cfs_rq->nr_running == 1))
 998                return;
 999
1000        clear_buddies(cfs_rq, se);
1001
1002        if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
1003                update_rq_clock(rq);
1004                /*
1005                 * Update run-time statistics of the 'current'.
1006                 */
1007                update_curr(cfs_rq);
1008
1009                return;
1010        }
1011        /*
1012         * Find the rightmost entry in the rbtree:
1013         */
1014        rightmost = __pick_last_entity(cfs_rq);
1015        /*
1016         * Already in the rightmost position?
1017         */
1018        if (unlikely(!rightmost || rightmost->vruntime < se->vruntime))
1019                return;
1020
1021        /*
1022         * Minimally necessary key value to be last in the tree:
1023         * Upon rescheduling, sched_class::put_prev_task() will place
1024         * 'current' within the tree based on its new key value.
1025         */
1026        se->vruntime = rightmost->vruntime + 1;
1027}
1028
1029/*
1030 * wake_idle() will wake a task on an idle cpu if task->cpu is
1031 * not idle and an idle cpu is available.  The span of cpus to
1032 * search starts with cpus closest then further out as needed,
1033 * so we always favor a closer, idle cpu.
1034 * Domains may include CPUs that are not usable for migration,
1035 * hence we need to mask them out (cpu_active_mask)
1036 *
1037 * Returns the CPU we should wake onto.
1038 */
1039#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
1040static int wake_idle(int cpu, struct task_struct *p)
1041{
1042        struct sched_domain *sd;
1043        int i;
1044        unsigned int chosen_wakeup_cpu;
1045        int this_cpu;
1046
1047        /*
1048         * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu
1049         * are idle and this is not a kernel thread and this task's affinity
1050         * allows it to be moved to preferred cpu, then just move!
1051         */
1052
1053        this_cpu = smp_processor_id();
1054        chosen_wakeup_cpu =
1055                cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu;
1056
1057        if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP &&
1058                idle_cpu(cpu) && idle_cpu(this_cpu) &&
1059                p->mm && !(p->flags & PF_KTHREAD) &&
1060                cpu_isset(chosen_wakeup_cpu, p->cpus_allowed))
1061                return chosen_wakeup_cpu;
1062
1063        /*
1064         * If it is idle, then it is the best cpu to run this task.
1065         *
1066         * This cpu is also the best, if it has more than one task already.
1067         * Siblings must be also busy(in most cases) as they didn't already
1068         * pickup the extra load from this cpu and hence we need not check
1069         * sibling runqueue info. This will avoid the checks and cache miss
1070         * penalities associated with that.
1071         */
1072        if (idle_cpu(cpu) || cpu_rq(cpu)->cfs.nr_running > 1)
1073                return cpu;
1074
1075        for_each_domain(cpu, sd) {
1076                if ((sd->flags & SD_WAKE_IDLE)
1077                    || ((sd->flags & SD_WAKE_IDLE_FAR)
1078                        && !task_hot(p, task_rq(p)->clock, sd))) {
1079                        for_each_cpu_and(i, sched_domain_span(sd),
1080                                         &p->cpus_allowed) {
1081                                if (cpu_active(i) && idle_cpu(i)) {
1082                                        if (i != task_cpu(p)) {
1083                                                schedstat_inc(p,
1084                                                       se.nr_wakeups_idle);
1085                                        }
1086                                        return i;
1087                                }
1088                        }
1089                } else {
1090                        break;
1091                }
1092        }
1093        return cpu;
1094}
1095#else /* !ARCH_HAS_SCHED_WAKE_IDLE*/
1096static inline int wake_idle(int cpu, struct task_struct *p)
1097{
1098        return cpu;
1099}
1100#endif
1101
1102#ifdef CONFIG_SMP
1103
1104#ifdef CONFIG_FAIR_GROUP_SCHED
1105/*
1106 * effective_load() calculates the load change as seen from the root_task_group
1107 *
1108 * Adding load to a group doesn't make a group heavier, but can cause movement
1109 * of group shares between cpus. Assuming the shares were perfectly aligned one
1110 * can calculate the shift in shares.
1111 *
1112 * The problem is that perfectly aligning the shares is rather expensive, hence
1113 * we try to avoid doing that too often - see update_shares(), which ratelimits
1114 * this change.
1115 *
1116 * We compensate this by not only taking the current delta into account, but
1117 * also considering the delta between when the shares were last adjusted and
1118 * now.
1119 *
1120 * We still saw a performance dip, some tracing learned us that between
1121 * cgroup:/ and cgroup:/foo balancing the number of affine wakeups increased
1122 * significantly. Therefore try to bias the error in direction of failing
1123 * the affine wakeup.
1124 *
1125 */
1126static long effective_load(struct task_group *tg, int cpu,
1127                long wl, long wg)
1128{
1129        struct sched_entity *se = tg->se[cpu];
1130
1131        if (!tg->parent)
1132                return wl;
1133
1134        /*
1135         * By not taking the decrease of shares on the other cpu into
1136         * account our error leans towards reducing the affine wakeups.
1137         */
1138        if (!wl && sched_feat(ASYM_EFF_LOAD))
1139                return wl;
1140
1141        for_each_sched_entity(se) {
1142                long S, rw, s, a, b;
1143                long more_w;
1144
1145                /*
1146                 * Instead of using this increment, also add the difference
1147                 * between when the shares were last updated and now.
1148                 */
1149                more_w = se->my_q->load.weight - se->my_q->rq_weight;
1150                wl += more_w;
1151                wg += more_w;
1152
1153                S = se->my_q->tg->shares;
1154                s = se->my_q->shares;
1155                rw = se->my_q->rq_weight;
1156
1157                a = S*(rw + wl);
1158                b = S*rw + s*wg;
1159
1160                wl = s*(a-b);
1161
1162                if (likely(b))
1163                        wl /= b;
1164
1165                /*
1166                 * Assume the group is already running and will
1167                 * thus already be accounted for in the weight.
1168                 *
1169                 * That is, moving shares between CPUs, does not
1170                 * alter the group weight.
1171                 */
1172                wg = 0;
1173        }
1174
1175        return wl;
1176}
1177
1178#else
1179
1180static inline unsigned long effective_load(struct task_group *tg, int cpu,
1181                unsigned long wl, unsigned long wg)
1182{
1183        return wl;
1184}
1185
1186#endif
1187
1188static int
1189wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
1190            struct task_struct *p, int prev_cpu, int this_cpu, int sync,
1191            int idx, unsigned long load, unsigned long this_load,
1192            unsigned int imbalance)
1193{
1194        struct task_struct *curr = this_rq->curr;
1195        struct task_group *tg;
1196        unsigned long tl = this_load;
1197        unsigned long tl_per_task;
1198        unsigned long weight;
1199        int balanced;
1200
1201        if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
1202                return 0;
1203
1204        if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost ||
1205                        p->se.avg_overlap > sysctl_sched_migration_cost))
1206                sync = 0;
1207
1208        /*
1209         * If sync wakeup then subtract the (maximum possible)
1210         * effect of the currently running task from the load
1211         * of the current CPU:
1212         */
1213        if (sync) {
1214                tg = task_group(current);
1215                weight = current->se.load.weight;
1216
1217                tl += effective_load(tg, this_cpu, -weight, -weight);
1218                load += effective_load(tg, prev_cpu, 0, -weight);
1219        }
1220
1221        tg = task_group(p);
1222        weight = p->se.load.weight;
1223
1224        balanced = 100*(tl + effective_load(tg, this_cpu, weight, weight)) <=
1225                imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
1226
1227        /*
1228         * If the currently running task will sleep within
1229         * a reasonable amount of time then attract this newly
1230         * woken task:
1231         */
1232        if (sync && balanced)
1233                return 1;
1234
1235        schedstat_inc(p, se.nr_wakeups_affine_attempts);
1236        tl_per_task = cpu_avg_load_per_task(this_cpu);
1237
1238        if (balanced || (tl <= load && tl + target_load(prev_cpu, idx) <=
1239                        tl_per_task)) {
1240                /*
1241                 * This domain has SD_WAKE_AFFINE and
1242                 * p is cache cold in this domain, and
1243                 * there is no bad imbalance.
1244                 */
1245                schedstat_inc(this_sd, ttwu_move_affine);
1246                schedstat_inc(p, se.nr_wakeups_affine);
1247
1248                return 1;
1249        }
1250        return 0;
1251}
1252
1253static int select_task_rq_fair(struct task_struct *p, int sync)
1254{
1255        struct sched_domain *sd, *this_sd = NULL;
1256        int prev_cpu, this_cpu, new_cpu;
1257        unsigned long load, this_load;
1258        struct rq *this_rq;
1259        unsigned int imbalance;
1260        int idx;
1261
1262        prev_cpu        = task_cpu(p);
1263        this_cpu        = smp_processor_id();
1264        this_rq         = cpu_rq(this_cpu);
1265        new_cpu         = prev_cpu;
1266
1267        if (prev_cpu == this_cpu)
1268                goto out;
1269        /*
1270         * 'this_sd' is the first domain that both
1271         * this_cpu and prev_cpu are present in:
1272         */
1273        for_each_domain(this_cpu, sd) {
1274                if (cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) {
1275                        this_sd = sd;
1276                        break;
1277                }
1278        }
1279
1280        if (unlikely(!cpumask_test_cpu(this_cpu, &p->cpus_allowed)))
1281                goto out;
1282
1283        /*
1284         * Check for affine wakeup and passive balancing possibilities.
1285         */
1286        if (!this_sd)
1287                goto out;
1288
1289        idx = this_sd->wake_idx;
1290
1291        imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
1292
1293        load = source_load(prev_cpu, idx);
1294        this_load = target_load(this_cpu, idx);
1295
1296        if (wake_affine(this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx,
1297                                     load, this_load, imbalance))
1298                return this_cpu;
1299
1300        /*
1301         * Start passive balancing when half the imbalance_pct
1302         * limit is reached.
1303         */
1304        if (this_sd->flags & SD_WAKE_BALANCE) {
1305                if (imbalance*this_load <= 100*load) {
1306                        schedstat_inc(this_sd, ttwu_move_balance);
1307                        schedstat_inc(p, se.nr_wakeups_passive);
1308                        return this_cpu;
1309                }
1310        }
1311
1312out:
1313        return wake_idle(new_cpu, p);
1314}
1315#endif /* CONFIG_SMP */
1316
1317/*
1318 * Adaptive granularity
1319 *
1320 * se->avg_wakeup gives the average time a task runs until it does a wakeup,
1321 * with the limit of wakeup_gran -- when it never does a wakeup.
1322 *
1323 * So the smaller avg_wakeup is the faster we want this task to preempt,
1324 * but we don't want to treat the preemptee unfairly and therefore allow it
1325 * to run for at least the amount of time we'd like to run.
1326 *
1327 * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
1328 *
1329 * NOTE: we use *nr_running to scale with load, this nicely matches the
1330 *       degrading latency on load.
1331 */
1332static unsigned long
1333adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
1334{
1335        u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1336        u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
1337        u64 gran = 0;
1338
1339        if (this_run < expected_wakeup)
1340                gran = expected_wakeup - this_run;
1341
1342        return min_t(s64, gran, sysctl_sched_wakeup_granularity);
1343}
1344
1345static unsigned long
1346wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
1347{
1348        unsigned long gran = sysctl_sched_wakeup_granularity;
1349
1350        if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
1351                gran = adaptive_gran(curr, se);
1352
1353        /*
1354         * Since its curr running now, convert the gran from real-time
1355         * to virtual-time in his units.
1356         */
1357        if (sched_feat(ASYM_GRAN)) {
1358                /*
1359                 * By using 'se' instead of 'curr' we penalize light tasks, so
1360                 * they get preempted easier. That is, if 'se' < 'curr' then
1361                 * the resulting gran will be larger, therefore penalizing the
1362                 * lighter, if otoh 'se' > 'curr' then the resulting gran will
1363                 * be smaller, again penalizing the lighter task.
1364                 *
1365                 * This is especially important for buddies when the leftmost
1366                 * task is higher priority than the buddy.
1367                 */
1368                if (unlikely(se->load.weight != NICE_0_LOAD))
1369                        gran = calc_delta_fair(gran, se);
1370        } else {
1371                if (unlikely(curr->load.weight != NICE_0_LOAD))
1372                        gran = calc_delta_fair(gran, curr);
1373        }
1374
1375        return gran;
1376}
1377
1378/*
1379 * Should 'se' preempt 'curr'.
1380 *
1381 *             |s1
1382 *        |s2
1383 *   |s3
1384 *         g
1385 *      |<--->|c
1386 *
1387 *  w(c, s1) = -1
1388 *  w(c, s2) =  0
1389 *  w(c, s3) =  1
1390 *
1391 */
1392static int
1393wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1394{
1395        s64 gran, vdiff = curr->vruntime - se->vruntime;
1396
1397        if (vdiff <= 0)
1398                return -1;
1399
1400        gran = wakeup_gran(curr, se);
1401        if (vdiff > gran)
1402                return 1;
1403
1404        return 0;
1405}
1406
1407static void set_last_buddy(struct sched_entity *se)
1408{
1409        if (likely(task_of(se)->policy != SCHED_IDLE)) {
1410                for_each_sched_entity(se)
1411                        cfs_rq_of(se)->last = se;
1412        }
1413}
1414
1415static void set_next_buddy(struct sched_entity *se)
1416{
1417        if (likely(task_of(se)->policy != SCHED_IDLE)) {
1418                for_each_sched_entity(se)
1419                        cfs_rq_of(se)->next = se;
1420        }
1421}
1422
1423/*
1424 * Preempt the current task with a newly woken task if needed:
1425 */
1426static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
1427{
1428        struct task_struct *curr = rq->curr;
1429        struct sched_entity *se = &curr->se, *pse = &p->se;
1430        struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1431
1432        update_curr(cfs_rq);
1433
1434        if (unlikely(rt_prio(p->prio))) {
1435                resched_task(curr);
1436                return;
1437        }
1438
1439        if (unlikely(p->sched_class != &fair_sched_class))
1440                return;
1441
1442        if (unlikely(se == pse))
1443                return;
1444
1445        /*
1446         * Only set the backward buddy when the current task is still on the
1447         * rq. This can happen when a wakeup gets interleaved with schedule on
1448         * the ->pre_schedule() or idle_balance() point, either of which can
1449         * drop the rq lock.
1450         *
1451         * Also, during early boot the idle thread is in the fair class, for
1452         * obvious reasons its a bad idea to schedule back to the idle thread.
1453         */
1454        if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle))
1455                set_last_buddy(se);
1456        set_next_buddy(pse);
1457
1458        /*
1459         * We can come here with TIF_NEED_RESCHED already set from new task
1460         * wake up path.
1461         */
1462        if (test_tsk_need_resched(curr))
1463                return;
1464
1465        /*
1466         * Batch and idle tasks do not preempt (their preemption is driven by
1467         * the tick):
1468         */
1469        if (unlikely(p->policy != SCHED_NORMAL))
1470                return;
1471
1472        /* Idle tasks are by definition preempted by everybody. */
1473        if (unlikely(curr->policy == SCHED_IDLE)) {
1474                resched_task(curr);
1475                return;
1476        }
1477
1478        if (!sched_feat(WAKEUP_PREEMPT))
1479                return;
1480
1481        if (sched_feat(WAKEUP_OVERLAP) && (sync ||
1482                        (se->avg_overlap < sysctl_sched_migration_cost &&
1483                         pse->avg_overlap < sysctl_sched_migration_cost))) {
1484                resched_task(curr);
1485                return;
1486        }
1487
1488        find_matching_se(&se, &pse);
1489
1490        while (se) {
1491                BUG_ON(!pse);
1492
1493                if (wakeup_preempt_entity(se, pse) == 1) {
1494                        resched_task(curr);
1495                        break;
1496                }
1497
1498                se = parent_entity(se);
1499                pse = parent_entity(pse);
1500        }
1501}
1502
1503static struct task_struct *pick_next_task_fair(struct rq *rq)
1504{
1505        struct task_struct *p;
1506        struct cfs_rq *cfs_rq = &rq->cfs;
1507        struct sched_entity *se;
1508
1509        if (unlikely(!cfs_rq->nr_running))
1510                return NULL;
1511
1512        do {
1513                se = pick_next_entity(cfs_rq);
1514                /*
1515                 * If se was a buddy, clear it so that it will have to earn
1516                 * the favour again.
1517                 */
1518                __clear_buddies(cfs_rq, se);
1519                set_next_entity(cfs_rq, se);
1520                cfs_rq = group_cfs_rq(se);
1521        } while (cfs_rq);
1522
1523        p = task_of(se);
1524        hrtick_start_fair(rq, p);
1525
1526        return p;
1527}
1528
1529/*
1530 * Account for a descheduled task:
1531 */
1532static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
1533{
1534        struct sched_entity *se = &prev->se;
1535        struct cfs_rq *cfs_rq;
1536
1537        for_each_sched_entity(se) {
1538                cfs_rq = cfs_rq_of(se);
1539                put_prev_entity(cfs_rq, se);
1540        }
1541}
1542
1543#ifdef CONFIG_SMP
1544/**************************************************
1545 * Fair scheduling class load-balancing methods:
1546 */
1547
1548/*
1549 * Load-balancing iterator. Note: while the runqueue stays locked
1550 * during the whole iteration, the current task might be
1551 * dequeued so the iterator has to be dequeue-safe. Here we
1552 * achieve that by always pre-iterating before returning
1553 * the current task:
1554 */
1555static struct task_struct *
1556__load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
1557{
1558        struct task_struct *p = NULL;
1559        struct sched_entity *se;
1560
1561        if (next == &cfs_rq->tasks)
1562                return NULL;
1563
1564        se = list_entry(next, struct sched_entity, group_node);
1565        p = task_of(se);
1566        cfs_rq->balance_iterator = next->next;
1567
1568        return p;
1569}
1570
1571static struct task_struct *load_balance_start_fair(void *arg)
1572{
1573        struct cfs_rq *cfs_rq = arg;
1574
1575        return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
1576}
1577
1578static struct task_struct *load_balance_next_fair(void *arg)
1579{
1580        struct cfs_rq *cfs_rq = arg;
1581
1582        return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
1583}
1584
1585static unsigned long
1586__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1587                unsigned long max_load_move, struct sched_domain *sd,
1588                enum cpu_idle_type idle, int *all_pinned, int *this_best_prio,
1589                struct cfs_rq *cfs_rq)
1590{
1591        struct rq_iterator cfs_rq_iterator;
1592
1593        cfs_rq_iterator.start = load_balance_start_fair;
1594        cfs_rq_iterator.next = load_balance_next_fair;
1595        cfs_rq_iterator.arg = cfs_rq;
1596
1597        return balance_tasks(this_rq, this_cpu, busiest,
1598                        max_load_move, sd, idle, all_pinned,
1599                        this_best_prio, &cfs_rq_iterator);
1600}
1601
1602#ifdef CONFIG_FAIR_GROUP_SCHED
1603static unsigned long
1604load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1605                  unsigned long max_load_move,
1606                  struct sched_domain *sd, enum cpu_idle_type idle,
1607                  int *all_pinned, int *this_best_prio)
1608{
1609        long rem_load_move = max_load_move;
1610        int busiest_cpu = cpu_of(busiest);
1611        struct task_group *tg;
1612
1613        rcu_read_lock();
1614        update_h_load(busiest_cpu);
1615
1616        list_for_each_entry_rcu(tg, &task_groups, list) {
1617                struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
1618                unsigned long busiest_h_load = busiest_cfs_rq->h_load;
1619                unsigned long busiest_weight = busiest_cfs_rq->load.weight;
1620                u64 rem_load, moved_load;
1621
1622                /*
1623                 * empty group
1624                 */
1625                if (!busiest_cfs_rq->task_weight)
1626                        continue;
1627
1628                rem_load = (u64)rem_load_move * busiest_weight;
1629                rem_load = div_u64(rem_load, busiest_h_load + 1);
1630
1631                moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
1632                                rem_load, sd, idle, all_pinned, this_best_prio,
1633                                tg->cfs_rq[busiest_cpu]);
1634
1635                if (!moved_load)
1636                        continue;
1637
1638                moved_load *= busiest_h_load;
1639                moved_load = div_u64(moved_load, busiest_weight + 1);
1640
1641                rem_load_move -= moved_load;
1642                if (rem_load_move < 0)
1643                        break;
1644        }
1645        rcu_read_unlock();
1646
1647        return max_load_move - rem_load_move;
1648}
1649#else
1650static unsigned long
1651load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1652                  unsigned long max_load_move,
1653                  struct sched_domain *sd, enum cpu_idle_type idle,
1654                  int *all_pinned, int *this_best_prio)
1655{
1656        return __load_balance_fair(this_rq, this_cpu, busiest,
1657                        max_load_move, sd, idle, all_pinned,
1658                        this_best_prio, &busiest->cfs);
1659}
1660#endif
1661
1662static int
1663move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1664                   struct sched_domain *sd, enum cpu_idle_type idle)
1665{
1666        struct cfs_rq *busy_cfs_rq;
1667        struct rq_iterator cfs_rq_iterator;
1668
1669        cfs_rq_iterator.start = load_balance_start_fair;
1670        cfs_rq_iterator.next = load_balance_next_fair;
1671
1672        for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
1673                /*
1674                 * pass busy_cfs_rq argument into
1675                 * load_balance_[start|next]_fair iterators
1676                 */
1677                cfs_rq_iterator.arg = busy_cfs_rq;
1678                if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
1679                                       &cfs_rq_iterator))
1680                    return 1;
1681        }
1682
1683        return 0;
1684}
1685#endif /* CONFIG_SMP */
1686
1687/*
1688 * scheduler tick hitting a task of our scheduling class:
1689 */
1690static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
1691{
1692        struct cfs_rq *cfs_rq;
1693        struct sched_entity *se = &curr->se;
1694
1695        for_each_sched_entity(se) {
1696                cfs_rq = cfs_rq_of(se);
1697                entity_tick(cfs_rq, se, queued);
1698        }
1699}
1700
1701/*
1702 * Share the fairness runtime between parent and child, thus the
1703 * total amount of pressure for CPU stays equal - new tasks
1704 * get a chance to run but frequent forkers are not allowed to
1705 * monopolize the CPU. Note: the parent runqueue is locked,
1706 * the child is not running yet.
1707 */
1708static void task_new_fair(struct rq *rq, struct task_struct *p)
1709{
1710        struct cfs_rq *cfs_rq = task_cfs_rq(p);
1711        struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
1712        int this_cpu = smp_processor_id();
1713
1714        sched_info_queued(p);
1715
1716        update_curr(cfs_rq);
1717        place_entity(cfs_rq, se, 1);
1718
1719        /* 'curr' will be NULL if the child belongs to a different group */
1720        if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
1721                        curr && curr->vruntime < se->vruntime) {
1722                /*
1723                 * Upon rescheduling, sched_class::put_prev_task() will place
1724                 * 'current' within the tree based on its new key value.
1725                 */
1726                swap(curr->vruntime, se->vruntime);
1727                resched_task(rq->curr);
1728        }
1729
1730        enqueue_task_fair(rq, p, 0);
1731}
1732
1733/*
1734 * Priority of the task has changed. Check to see if we preempt
1735 * the current task.
1736 */
1737static void prio_changed_fair(struct rq *rq, struct task_struct *p,
1738                              int oldprio, int running)
1739{
1740        /*
1741         * Reschedule if we are currently running on this runqueue and
1742         * our priority decreased, or if we are not currently running on
1743         * this runqueue and our priority is higher than the current's
1744         */
1745        if (running) {
1746                if (p->prio > oldprio)
1747                        resched_task(rq->curr);
1748        } else
1749                check_preempt_curr(rq, p, 0);
1750}
1751
1752/*
1753 * We switched to the sched_fair class.
1754 */
1755static void switched_to_fair(struct rq *rq, struct task_struct *p,
1756                             int running)
1757{
1758        /*
1759         * We were most likely switched from sched_rt, so
1760         * kick off the schedule if running, otherwise just see
1761         * if we can still preempt the current task.
1762         */
1763        if (running)
1764                resched_task(rq->curr);
1765        else
1766                check_preempt_curr(rq, p, 0);
1767}
1768
1769/* Account for a task changing its policy or group.
1770 *
1771 * This routine is mostly called to set cfs_rq->curr field when a task
1772 * migrates between groups/classes.
1773 */
1774static void set_curr_task_fair(struct rq *rq)
1775{
1776        struct sched_entity *se = &rq->curr->se;
1777
1778        for_each_sched_entity(se)
1779                set_next_entity(cfs_rq_of(se), se);
1780}
1781
1782#ifdef CONFIG_FAIR_GROUP_SCHED
1783static void moved_group_fair(struct task_struct *p)
1784{
1785        struct cfs_rq *cfs_rq = task_cfs_rq(p);
1786
1787        update_curr(cfs_rq);
1788        place_entity(cfs_rq, &p->se, 1);
1789}
1790#endif
1791
1792/*
1793 * All the scheduling class methods:
1794 */
1795static const struct sched_class fair_sched_class = {
1796        .next                   = &idle_sched_class,
1797        .enqueue_task           = enqueue_task_fair,
1798        .dequeue_task           = dequeue_task_fair,
1799        .yield_task             = yield_task_fair,
1800
1801        .check_preempt_curr     = check_preempt_wakeup,
1802
1803        .pick_next_task         = pick_next_task_fair,
1804        .put_prev_task          = put_prev_task_fair,
1805
1806#ifdef CONFIG_SMP
1807        .select_task_rq         = select_task_rq_fair,
1808
1809        .load_balance           = load_balance_fair,
1810        .move_one_task          = move_one_task_fair,
1811#endif
1812
1813        .set_curr_task          = set_curr_task_fair,
1814        .task_tick              = task_tick_fair,
1815        .task_new               = task_new_fair,
1816
1817        .prio_changed           = prio_changed_fair,
1818        .switched_to            = switched_to_fair,
1819
1820#ifdef CONFIG_FAIR_GROUP_SCHED
1821        .moved_group            = moved_group_fair,
1822#endif
1823};
1824
1825#ifdef CONFIG_SCHED_DEBUG
1826static void print_cfs_stats(struct seq_file *m, int cpu)
1827{
1828        struct cfs_rq *cfs_rq;
1829
1830        rcu_read_lock();
1831        for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
1832                print_cfs_rq(m, cpu, cfs_rq);
1833        rcu_read_unlock();
1834}
1835#endif
1836