linux/kernel/sched_fair.c
<<
>>
Prefs
   1/*
   2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
   3 *
   4 *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   5 *
   6 *  Interactivity improvements by Mike Galbraith
   7 *  (C) 2007 Mike Galbraith <efault@gmx.de>
   8 *
   9 *  Various enhancements by Dmitry Adamushko.
  10 *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
  11 *
  12 *  Group scheduling enhancements by Srivatsa Vaddagiri
  13 *  Copyright IBM Corporation, 2007
  14 *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
  15 *
  16 *  Scaled math optimizations by Thomas Gleixner
  17 *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
  18 *
  19 *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
  20 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  21 */
  22
  23#include <linux/latencytop.h>
  24#include <linux/sched.h>
  25#include <linux/cpumask.h>
  26
  27/*
  28 * Targeted preemption latency for CPU-bound tasks:
  29 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
  30 *
  31 * NOTE: this latency value is not the same as the concept of
  32 * 'timeslice length' - timeslices in CFS are of variable length
  33 * and have no persistent notion like in traditional, time-slice
  34 * based scheduling concepts.
  35 *
  36 * (to see the precise effective timeslice length of your workload,
  37 *  run vmstat and monitor the context-switches (cs) field)
  38 */
  39unsigned int sysctl_sched_latency = 6000000ULL;
  40unsigned int normalized_sysctl_sched_latency = 6000000ULL;
  41
  42/*
  43 * The initial- and re-scaling of tunables is configurable
  44 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
  45 *
  46 * Options are:
  47 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
  48 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
  49 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
  50 */
  51enum sched_tunable_scaling sysctl_sched_tunable_scaling
  52        = SCHED_TUNABLESCALING_LOG;
  53
  54/*
  55 * Minimal preemption granularity for CPU-bound tasks:
  56 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
  57 */
  58unsigned int sysctl_sched_min_granularity = 750000ULL;
  59unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
  60
  61/*
  62 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
  63 */
  64static unsigned int sched_nr_latency = 8;
  65
  66/*
  67 * After fork, child runs first. If set to 0 (default) then
  68 * parent will (try to) run first.
  69 */
  70unsigned int sysctl_sched_child_runs_first __read_mostly;
  71
  72/*
  73 * SCHED_OTHER wake-up granularity.
  74 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
  75 *
  76 * This option delays the preemption effects of decoupled workloads
  77 * and reduces their over-scheduling. Synchronous workloads will still
  78 * have immediate wakeup/sleep latencies.
  79 */
  80unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
  81unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
  82
  83const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
  84
  85/*
  86 * The exponential sliding  window over which load is averaged for shares
  87 * distribution.
  88 * (default: 10msec)
  89 */
  90unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
  91
  92static const struct sched_class fair_sched_class;
  93
  94/**************************************************************
  95 * CFS operations on generic schedulable entities:
  96 */
  97
  98#ifdef CONFIG_FAIR_GROUP_SCHED
  99
 100/* cpu runqueue to which this cfs_rq is attached */
 101static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
 102{
 103        return cfs_rq->rq;
 104}
 105
 106/* An entity is a task if it doesn't "own" a runqueue */
 107#define entity_is_task(se)      (!se->my_q)
 108
 109static inline struct task_struct *task_of(struct sched_entity *se)
 110{
 111#ifdef CONFIG_SCHED_DEBUG
 112        WARN_ON_ONCE(!entity_is_task(se));
 113#endif
 114        return container_of(se, struct task_struct, se);
 115}
 116
 117/* Walk up scheduling entities hierarchy */
 118#define for_each_sched_entity(se) \
 119                for (; se; se = se->parent)
 120
 121static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
 122{
 123        return p->se.cfs_rq;
 124}
 125
 126/* runqueue on which this entity is (to be) queued */
 127static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
 128{
 129        return se->cfs_rq;
 130}
 131
 132/* runqueue "owned" by this group */
 133static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
 134{
 135        return grp->my_q;
 136}
 137
 138static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
 139{
 140        if (!cfs_rq->on_list) {
 141                /*
 142                 * Ensure we either appear before our parent (if already
 143                 * enqueued) or force our parent to appear after us when it is
 144                 * enqueued.  The fact that we always enqueue bottom-up
 145                 * reduces this to two cases.
 146                 */
 147                if (cfs_rq->tg->parent &&
 148                    cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
 149                        list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
 150                                &rq_of(cfs_rq)->leaf_cfs_rq_list);
 151                } else {
 152                        list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
 153                                &rq_of(cfs_rq)->leaf_cfs_rq_list);
 154                }
 155
 156                cfs_rq->on_list = 1;
 157        }
 158}
 159
 160static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
 161{
 162        if (cfs_rq->on_list) {
 163                list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
 164                cfs_rq->on_list = 0;
 165        }
 166}
 167
 168/* Iterate thr' all leaf cfs_rq's on a runqueue */
 169#define for_each_leaf_cfs_rq(rq, cfs_rq) \
 170        list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
 171
 172/* Do the two (enqueued) entities belong to the same group ? */
 173static inline int
 174is_same_group(struct sched_entity *se, struct sched_entity *pse)
 175{
 176        if (se->cfs_rq == pse->cfs_rq)
 177                return 1;
 178
 179        return 0;
 180}
 181
 182static inline struct sched_entity *parent_entity(struct sched_entity *se)
 183{
 184        return se->parent;
 185}
 186
 187/* return depth at which a sched entity is present in the hierarchy */
 188static inline int depth_se(struct sched_entity *se)
 189{
 190        int depth = 0;
 191
 192        for_each_sched_entity(se)
 193                depth++;
 194
 195        return depth;
 196}
 197
 198static void
 199find_matching_se(struct sched_entity **se, struct sched_entity **pse)
 200{
 201        int se_depth, pse_depth;
 202
 203        /*
 204         * preemption test can be made between sibling entities who are in the
 205         * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
 206         * both tasks until we find their ancestors who are siblings of common
 207         * parent.
 208         */
 209
 210        /* First walk up until both entities are at same depth */
 211        se_depth = depth_se(*se);
 212        pse_depth = depth_se(*pse);
 213
 214        while (se_depth > pse_depth) {
 215                se_depth--;
 216                *se = parent_entity(*se);
 217        }
 218
 219        while (pse_depth > se_depth) {
 220                pse_depth--;
 221                *pse = parent_entity(*pse);
 222        }
 223
 224        while (!is_same_group(*se, *pse)) {
 225                *se = parent_entity(*se);
 226                *pse = parent_entity(*pse);
 227        }
 228}
 229
 230#else   /* !CONFIG_FAIR_GROUP_SCHED */
 231
 232static inline struct task_struct *task_of(struct sched_entity *se)
 233{
 234        return container_of(se, struct task_struct, se);
 235}
 236
 237static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
 238{
 239        return container_of(cfs_rq, struct rq, cfs);
 240}
 241
 242#define entity_is_task(se)      1
 243
 244#define for_each_sched_entity(se) \
 245                for (; se; se = NULL)
 246
 247static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
 248{
 249        return &task_rq(p)->cfs;
 250}
 251
 252static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
 253{
 254        struct task_struct *p = task_of(se);
 255        struct rq *rq = task_rq(p);
 256
 257        return &rq->cfs;
 258}
 259
 260/* runqueue "owned" by this group */
 261static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
 262{
 263        return NULL;
 264}
 265
 266static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
 267{
 268}
 269
 270static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
 271{
 272}
 273
 274#define for_each_leaf_cfs_rq(rq, cfs_rq) \
 275                for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
 276
 277static inline int
 278is_same_group(struct sched_entity *se, struct sched_entity *pse)
 279{
 280        return 1;
 281}
 282
 283static inline struct sched_entity *parent_entity(struct sched_entity *se)
 284{
 285        return NULL;
 286}
 287
 288static inline void
 289find_matching_se(struct sched_entity **se, struct sched_entity **pse)
 290{
 291}
 292
 293#endif  /* CONFIG_FAIR_GROUP_SCHED */
 294
 295
 296/**************************************************************
 297 * Scheduling class tree data structure manipulation methods:
 298 */
 299
 300static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
 301{
 302        s64 delta = (s64)(vruntime - min_vruntime);
 303        if (delta > 0)
 304                min_vruntime = vruntime;
 305
 306        return min_vruntime;
 307}
 308
 309static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
 310{
 311        s64 delta = (s64)(vruntime - min_vruntime);
 312        if (delta < 0)
 313                min_vruntime = vruntime;
 314
 315        return min_vruntime;
 316}
 317
 318static inline int entity_before(struct sched_entity *a,
 319                                struct sched_entity *b)
 320{
 321        return (s64)(a->vruntime - b->vruntime) < 0;
 322}
 323
 324static void update_min_vruntime(struct cfs_rq *cfs_rq)
 325{
 326        u64 vruntime = cfs_rq->min_vruntime;
 327
 328        if (cfs_rq->curr)
 329                vruntime = cfs_rq->curr->vruntime;
 330
 331        if (cfs_rq->rb_leftmost) {
 332                struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
 333                                                   struct sched_entity,
 334                                                   run_node);
 335
 336                if (!cfs_rq->curr)
 337                        vruntime = se->vruntime;
 338                else
 339                        vruntime = min_vruntime(vruntime, se->vruntime);
 340        }
 341
 342        cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
 343#ifndef CONFIG_64BIT
 344        smp_wmb();
 345        cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
 346#endif
 347}
 348
 349/*
 350 * Enqueue an entity into the rb-tree:
 351 */
 352static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 353{
 354        struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
 355        struct rb_node *parent = NULL;
 356        struct sched_entity *entry;
 357        int leftmost = 1;
 358
 359        /*
 360         * Find the right place in the rbtree:
 361         */
 362        while (*link) {
 363                parent = *link;
 364                entry = rb_entry(parent, struct sched_entity, run_node);
 365                /*
 366                 * We dont care about collisions. Nodes with
 367                 * the same key stay together.
 368                 */
 369                if (entity_before(se, entry)) {
 370                        link = &parent->rb_left;
 371                } else {
 372                        link = &parent->rb_right;
 373                        leftmost = 0;
 374                }
 375        }
 376
 377        /*
 378         * Maintain a cache of leftmost tree entries (it is frequently
 379         * used):
 380         */
 381        if (leftmost)
 382                cfs_rq->rb_leftmost = &se->run_node;
 383
 384        rb_link_node(&se->run_node, parent, link);
 385        rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
 386}
 387
 388static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
 389{
 390        if (cfs_rq->rb_leftmost == &se->run_node) {
 391                struct rb_node *next_node;
 392
 393                next_node = rb_next(&se->run_node);
 394                cfs_rq->rb_leftmost = next_node;
 395        }
 396
 397        rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
 398}
 399
 400static struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
 401{
 402        struct rb_node *left = cfs_rq->rb_leftmost;
 403
 404        if (!left)
 405                return NULL;
 406
 407        return rb_entry(left, struct sched_entity, run_node);
 408}
 409
 410static struct sched_entity *__pick_next_entity(struct sched_entity *se)
 411{
 412        struct rb_node *next = rb_next(&se->run_node);
 413
 414        if (!next)
 415                return NULL;
 416
 417        return rb_entry(next, struct sched_entity, run_node);
 418}
 419
 420#ifdef CONFIG_SCHED_DEBUG
 421static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
 422{
 423        struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
 424
 425        if (!last)
 426                return NULL;
 427
 428        return rb_entry(last, struct sched_entity, run_node);
 429}
 430
 431/**************************************************************
 432 * Scheduling class statistics methods:
 433 */
 434
 435int sched_proc_update_handler(struct ctl_table *table, int write,
 436                void __user *buffer, size_t *lenp,
 437                loff_t *ppos)
 438{
 439        int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 440        int factor = get_update_sysctl_factor();
 441
 442        if (ret || !write)
 443                return ret;
 444
 445        sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
 446                                        sysctl_sched_min_granularity);
 447
 448#define WRT_SYSCTL(name) \
 449        (normalized_sysctl_##name = sysctl_##name / (factor))
 450        WRT_SYSCTL(sched_min_granularity);
 451        WRT_SYSCTL(sched_latency);
 452        WRT_SYSCTL(sched_wakeup_granularity);
 453#undef WRT_SYSCTL
 454
 455        return 0;
 456}
 457#endif
 458
 459/*
 460 * delta /= w
 461 */
 462static inline unsigned long
 463calc_delta_fair(unsigned long delta, struct sched_entity *se)
 464{
 465        if (unlikely(se->load.weight != NICE_0_LOAD))
 466                delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
 467
 468        return delta;
 469}
 470
 471/*
 472 * The idea is to set a period in which each task runs once.
 473 *
 474 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
 475 * this period because otherwise the slices get too small.
 476 *
 477 * p = (nr <= nl) ? l : l*nr/nl
 478 */
 479static u64 __sched_period(unsigned long nr_running)
 480{
 481        u64 period = sysctl_sched_latency;
 482        unsigned long nr_latency = sched_nr_latency;
 483
 484        if (unlikely(nr_running > nr_latency)) {
 485                period = sysctl_sched_min_granularity;
 486                period *= nr_running;
 487        }
 488
 489        return period;
 490}
 491
 492/*
 493 * We calculate the wall-time slice from the period by taking a part
 494 * proportional to the weight.
 495 *
 496 * s = p*P[w/rw]
 497 */
 498static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 499{
 500        u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
 501
 502        for_each_sched_entity(se) {
 503                struct load_weight *load;
 504                struct load_weight lw;
 505
 506                cfs_rq = cfs_rq_of(se);
 507                load = &cfs_rq->load;
 508
 509                if (unlikely(!se->on_rq)) {
 510                        lw = cfs_rq->load;
 511
 512                        update_load_add(&lw, se->load.weight);
 513                        load = &lw;
 514                }
 515                slice = calc_delta_mine(slice, se->load.weight, load);
 516        }
 517        return slice;
 518}
 519
 520/*
 521 * We calculate the vruntime slice of a to be inserted task
 522 *
 523 * vs = s/w
 524 */
 525static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
 526{
 527        return calc_delta_fair(sched_slice(cfs_rq, se), se);
 528}
 529
 530static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update);
 531static void update_cfs_shares(struct cfs_rq *cfs_rq);
 532
 533/*
 534 * Update the current task's runtime statistics. Skip current tasks that
 535 * are not in our scheduling class.
 536 */
 537static inline void
 538__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
 539              unsigned long delta_exec)
 540{
 541        unsigned long delta_exec_weighted;
 542
 543        schedstat_set(curr->statistics.exec_max,
 544                      max((u64)delta_exec, curr->statistics.exec_max));
 545
 546        curr->sum_exec_runtime += delta_exec;
 547        schedstat_add(cfs_rq, exec_clock, delta_exec);
 548        delta_exec_weighted = calc_delta_fair(delta_exec, curr);
 549
 550        curr->vruntime += delta_exec_weighted;
 551        update_min_vruntime(cfs_rq);
 552
 553#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
 554        cfs_rq->load_unacc_exec_time += delta_exec;
 555#endif
 556}
 557
 558static void update_curr(struct cfs_rq *cfs_rq)
 559{
 560        struct sched_entity *curr = cfs_rq->curr;
 561        u64 now = rq_of(cfs_rq)->clock_task;
 562        unsigned long delta_exec;
 563
 564        if (unlikely(!curr))
 565                return;
 566
 567        /*
 568         * Get the amount of time the current task was running
 569         * since the last time we changed load (this cannot
 570         * overflow on 32 bits):
 571         */
 572        delta_exec = (unsigned long)(now - curr->exec_start);
 573        if (!delta_exec)
 574                return;
 575
 576        __update_curr(cfs_rq, curr, delta_exec);
 577        curr->exec_start = now;
 578
 579        if (entity_is_task(curr)) {
 580                struct task_struct *curtask = task_of(curr);
 581
 582                trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
 583                cpuacct_charge(curtask, delta_exec);
 584                account_group_exec_runtime(curtask, delta_exec);
 585        }
 586}
 587
 588static inline void
 589update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
 590{
 591        schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
 592}
 593
 594/*
 595 * Task is being enqueued - update stats:
 596 */
 597static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 598{
 599        /*
 600         * Are we enqueueing a waiting task? (for current tasks
 601         * a dequeue/enqueue event is a NOP)
 602         */
 603        if (se != cfs_rq->curr)
 604                update_stats_wait_start(cfs_rq, se);
 605}
 606
 607static void
 608update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
 609{
 610        schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
 611                        rq_of(cfs_rq)->clock - se->statistics.wait_start));
 612        schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
 613        schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
 614                        rq_of(cfs_rq)->clock - se->statistics.wait_start);
 615#ifdef CONFIG_SCHEDSTATS
 616        if (entity_is_task(se)) {
 617                trace_sched_stat_wait(task_of(se),
 618                        rq_of(cfs_rq)->clock - se->statistics.wait_start);
 619        }
 620#endif
 621        schedstat_set(se->statistics.wait_start, 0);
 622}
 623
 624static inline void
 625update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 626{
 627        /*
 628         * Mark the end of the wait period if dequeueing a
 629         * waiting task:
 630         */
 631        if (se != cfs_rq->curr)
 632                update_stats_wait_end(cfs_rq, se);
 633}
 634
 635/*
 636 * We are picking a new current task - update its stats:
 637 */
 638static inline void
 639update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
 640{
 641        /*
 642         * We are starting a new run period:
 643         */
 644        se->exec_start = rq_of(cfs_rq)->clock_task;
 645}
 646
 647/**************************************************
 648 * Scheduling class queueing methods:
 649 */
 650
 651#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
 652static void
 653add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
 654{
 655        cfs_rq->task_weight += weight;
 656}
 657#else
 658static inline void
 659add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
 660{
 661}
 662#endif
 663
 664static void
 665account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 666{
 667        update_load_add(&cfs_rq->load, se->load.weight);
 668        if (!parent_entity(se))
 669                inc_cpu_load(rq_of(cfs_rq), se->load.weight);
 670        if (entity_is_task(se)) {
 671                add_cfs_task_weight(cfs_rq, se->load.weight);
 672                list_add(&se->group_node, &cfs_rq->tasks);
 673        }
 674        cfs_rq->nr_running++;
 675}
 676
 677static void
 678account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 679{
 680        update_load_sub(&cfs_rq->load, se->load.weight);
 681        if (!parent_entity(se))
 682                dec_cpu_load(rq_of(cfs_rq), se->load.weight);
 683        if (entity_is_task(se)) {
 684                add_cfs_task_weight(cfs_rq, -se->load.weight);
 685                list_del_init(&se->group_node);
 686        }
 687        cfs_rq->nr_running--;
 688}
 689
 690#ifdef CONFIG_FAIR_GROUP_SCHED
 691# ifdef CONFIG_SMP
 692static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
 693                                            int global_update)
 694{
 695        struct task_group *tg = cfs_rq->tg;
 696        long load_avg;
 697
 698        load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
 699        load_avg -= cfs_rq->load_contribution;
 700
 701        if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) {
 702                atomic_add(load_avg, &tg->load_weight);
 703                cfs_rq->load_contribution += load_avg;
 704        }
 705}
 706
 707static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
 708{
 709        u64 period = sysctl_sched_shares_window;
 710        u64 now, delta;
 711        unsigned long load = cfs_rq->load.weight;
 712
 713        if (cfs_rq->tg == &root_task_group)
 714                return;
 715
 716        now = rq_of(cfs_rq)->clock_task;
 717        delta = now - cfs_rq->load_stamp;
 718
 719        /* truncate load history at 4 idle periods */
 720        if (cfs_rq->load_stamp > cfs_rq->load_last &&
 721            now - cfs_rq->load_last > 4 * period) {
 722                cfs_rq->load_period = 0;
 723                cfs_rq->load_avg = 0;
 724                delta = period - 1;
 725        }
 726
 727        cfs_rq->load_stamp = now;
 728        cfs_rq->load_unacc_exec_time = 0;
 729        cfs_rq->load_period += delta;
 730        if (load) {
 731                cfs_rq->load_last = now;
 732                cfs_rq->load_avg += delta * load;
 733        }
 734
 735        /* consider updating load contribution on each fold or truncate */
 736        if (global_update || cfs_rq->load_period > period
 737            || !cfs_rq->load_period)
 738                update_cfs_rq_load_contribution(cfs_rq, global_update);
 739
 740        while (cfs_rq->load_period > period) {
 741                /*
 742                 * Inline assembly required to prevent the compiler
 743                 * optimising this loop into a divmod call.
 744                 * See __iter_div_u64_rem() for another example of this.
 745                 */
 746                asm("" : "+rm" (cfs_rq->load_period));
 747                cfs_rq->load_period /= 2;
 748                cfs_rq->load_avg /= 2;
 749        }
 750
 751        if (!cfs_rq->curr && !cfs_rq->nr_running && !cfs_rq->load_avg)
 752                list_del_leaf_cfs_rq(cfs_rq);
 753}
 754
 755static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
 756{
 757        long load_weight, load, shares;
 758
 759        load = cfs_rq->load.weight;
 760
 761        load_weight = atomic_read(&tg->load_weight);
 762        load_weight += load;
 763        load_weight -= cfs_rq->load_contribution;
 764
 765        shares = (tg->shares * load);
 766        if (load_weight)
 767                shares /= load_weight;
 768
 769        if (shares < MIN_SHARES)
 770                shares = MIN_SHARES;
 771        if (shares > tg->shares)
 772                shares = tg->shares;
 773
 774        return shares;
 775}
 776
 777static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
 778{
 779        if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
 780                update_cfs_load(cfs_rq, 0);
 781                update_cfs_shares(cfs_rq);
 782        }
 783}
 784# else /* CONFIG_SMP */
 785static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
 786{
 787}
 788
 789static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
 790{
 791        return tg->shares;
 792}
 793
 794static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
 795{
 796}
 797# endif /* CONFIG_SMP */
 798static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
 799                            unsigned long weight)
 800{
 801        if (se->on_rq) {
 802                /* commit outstanding execution time */
 803                if (cfs_rq->curr == se)
 804                        update_curr(cfs_rq);
 805                account_entity_dequeue(cfs_rq, se);
 806        }
 807
 808        update_load_set(&se->load, weight);
 809
 810        if (se->on_rq)
 811                account_entity_enqueue(cfs_rq, se);
 812}
 813
 814static void update_cfs_shares(struct cfs_rq *cfs_rq)
 815{
 816        struct task_group *tg;
 817        struct sched_entity *se;
 818        long shares;
 819
 820        tg = cfs_rq->tg;
 821        se = tg->se[cpu_of(rq_of(cfs_rq))];
 822        if (!se)
 823                return;
 824#ifndef CONFIG_SMP
 825        if (likely(se->load.weight == tg->shares))
 826                return;
 827#endif
 828        shares = calc_cfs_shares(cfs_rq, tg);
 829
 830        reweight_entity(cfs_rq_of(se), se, shares);
 831}
 832#else /* CONFIG_FAIR_GROUP_SCHED */
 833static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
 834{
 835}
 836
 837static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
 838{
 839}
 840
 841static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
 842{
 843}
 844#endif /* CONFIG_FAIR_GROUP_SCHED */
 845
 846static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
 847{
 848#ifdef CONFIG_SCHEDSTATS
 849        struct task_struct *tsk = NULL;
 850
 851        if (entity_is_task(se))
 852                tsk = task_of(se);
 853
 854        if (se->statistics.sleep_start) {
 855                u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
 856
 857                if ((s64)delta < 0)
 858                        delta = 0;
 859
 860                if (unlikely(delta > se->statistics.sleep_max))
 861                        se->statistics.sleep_max = delta;
 862
 863                se->statistics.sleep_start = 0;
 864                se->statistics.sum_sleep_runtime += delta;
 865
 866                if (tsk) {
 867                        account_scheduler_latency(tsk, delta >> 10, 1);
 868                        trace_sched_stat_sleep(tsk, delta);
 869                }
 870        }
 871        if (se->statistics.block_start) {
 872                u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
 873
 874                if ((s64)delta < 0)
 875                        delta = 0;
 876
 877                if (unlikely(delta > se->statistics.block_max))
 878                        se->statistics.block_max = delta;
 879
 880                se->statistics.block_start = 0;
 881                se->statistics.sum_sleep_runtime += delta;
 882
 883                if (tsk) {
 884                        if (tsk->in_iowait) {
 885                                se->statistics.iowait_sum += delta;
 886                                se->statistics.iowait_count++;
 887                                trace_sched_stat_iowait(tsk, delta);
 888                        }
 889
 890                        /*
 891                         * Blocking time is in units of nanosecs, so shift by
 892                         * 20 to get a milliseconds-range estimation of the
 893                         * amount of time that the task spent sleeping:
 894                         */
 895                        if (unlikely(prof_on == SLEEP_PROFILING)) {
 896                                profile_hits(SLEEP_PROFILING,
 897                                                (void *)get_wchan(tsk),
 898                                                delta >> 20);
 899                        }
 900                        account_scheduler_latency(tsk, delta >> 10, 0);
 901                }
 902        }
 903#endif
 904}
 905
 906static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
 907{
 908#ifdef CONFIG_SCHED_DEBUG
 909        s64 d = se->vruntime - cfs_rq->min_vruntime;
 910
 911        if (d < 0)
 912                d = -d;
 913
 914        if (d > 3*sysctl_sched_latency)
 915                schedstat_inc(cfs_rq, nr_spread_over);
 916#endif
 917}
 918
 919static void
 920place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
 921{
 922        u64 vruntime = cfs_rq->min_vruntime;
 923
 924        /*
 925         * The 'current' period is already promised to the current tasks,
 926         * however the extra weight of the new task will slow them down a
 927         * little, place the new task so that it fits in the slot that
 928         * stays open at the end.
 929         */
 930        if (initial && sched_feat(START_DEBIT))
 931                vruntime += sched_vslice(cfs_rq, se);
 932
 933        /* sleeps up to a single latency don't count. */
 934        if (!initial) {
 935                unsigned long thresh = sysctl_sched_latency;
 936
 937                /*
 938                 * Halve their sleep time's effect, to allow
 939                 * for a gentler effect of sleepers:
 940                 */
 941                if (sched_feat(GENTLE_FAIR_SLEEPERS))
 942                        thresh >>= 1;
 943
 944                vruntime -= thresh;
 945        }
 946
 947        /* ensure we never gain time by being placed backwards. */
 948        vruntime = max_vruntime(se->vruntime, vruntime);
 949
 950        se->vruntime = vruntime;
 951}
 952
 953static void
 954enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 955{
 956        /*
 957         * Update the normalized vruntime before updating min_vruntime
 958         * through callig update_curr().
 959         */
 960        if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
 961                se->vruntime += cfs_rq->min_vruntime;
 962
 963        /*
 964         * Update run-time statistics of the 'current'.
 965         */
 966        update_curr(cfs_rq);
 967        update_cfs_load(cfs_rq, 0);
 968        account_entity_enqueue(cfs_rq, se);
 969        update_cfs_shares(cfs_rq);
 970
 971        if (flags & ENQUEUE_WAKEUP) {
 972                place_entity(cfs_rq, se, 0);
 973                enqueue_sleeper(cfs_rq, se);
 974        }
 975
 976        update_stats_enqueue(cfs_rq, se);
 977        check_spread(cfs_rq, se);
 978        if (se != cfs_rq->curr)
 979                __enqueue_entity(cfs_rq, se);
 980        se->on_rq = 1;
 981
 982        if (cfs_rq->nr_running == 1)
 983                list_add_leaf_cfs_rq(cfs_rq);
 984}
 985
 986static void __clear_buddies_last(struct sched_entity *se)
 987{
 988        for_each_sched_entity(se) {
 989                struct cfs_rq *cfs_rq = cfs_rq_of(se);
 990                if (cfs_rq->last == se)
 991                        cfs_rq->last = NULL;
 992                else
 993                        break;
 994        }
 995}
 996
 997static void __clear_buddies_next(struct sched_entity *se)
 998{
 999        for_each_sched_entity(se) {
1000                struct cfs_rq *cfs_rq = cfs_rq_of(se);
1001                if (cfs_rq->next == se)
1002                        cfs_rq->next = NULL;
1003                else
1004                        break;
1005        }
1006}
1007
1008static void __clear_buddies_skip(struct sched_entity *se)
1009{
1010        for_each_sched_entity(se) {
1011                struct cfs_rq *cfs_rq = cfs_rq_of(se);
1012                if (cfs_rq->skip == se)
1013                        cfs_rq->skip = NULL;
1014                else
1015                        break;
1016        }
1017}
1018
1019static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
1020{
1021        if (cfs_rq->last == se)
1022                __clear_buddies_last(se);
1023
1024        if (cfs_rq->next == se)
1025                __clear_buddies_next(se);
1026
1027        if (cfs_rq->skip == se)
1028                __clear_buddies_skip(se);
1029}
1030
1031static void
1032dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1033{
1034        /*
1035         * Update run-time statistics of the 'current'.
1036         */
1037        update_curr(cfs_rq);
1038
1039        update_stats_dequeue(cfs_rq, se);
1040        if (flags & DEQUEUE_SLEEP) {
1041#ifdef CONFIG_SCHEDSTATS
1042                if (entity_is_task(se)) {
1043                        struct task_struct *tsk = task_of(se);
1044
1045                        if (tsk->state & TASK_INTERRUPTIBLE)
1046                                se->statistics.sleep_start = rq_of(cfs_rq)->clock;
1047                        if (tsk->state & TASK_UNINTERRUPTIBLE)
1048                                se->statistics.block_start = rq_of(cfs_rq)->clock;
1049                }
1050#endif
1051        }
1052
1053        clear_buddies(cfs_rq, se);
1054
1055        if (se != cfs_rq->curr)
1056                __dequeue_entity(cfs_rq, se);
1057        se->on_rq = 0;
1058        update_cfs_load(cfs_rq, 0);
1059        account_entity_dequeue(cfs_rq, se);
1060
1061        /*
1062         * Normalize the entity after updating the min_vruntime because the
1063         * update can refer to the ->curr item and we need to reflect this
1064         * movement in our normalized position.
1065         */
1066        if (!(flags & DEQUEUE_SLEEP))
1067                se->vruntime -= cfs_rq->min_vruntime;
1068
1069        update_min_vruntime(cfs_rq);
1070        update_cfs_shares(cfs_rq);
1071}
1072
1073/*
1074 * Preempt the current task with a newly woken task if needed:
1075 */
1076static void
1077check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
1078{
1079        unsigned long ideal_runtime, delta_exec;
1080
1081        ideal_runtime = sched_slice(cfs_rq, curr);
1082        delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1083        if (delta_exec > ideal_runtime) {
1084                resched_task(rq_of(cfs_rq)->curr);
1085                /*
1086                 * The current task ran long enough, ensure it doesn't get
1087                 * re-elected due to buddy favours.
1088                 */
1089                clear_buddies(cfs_rq, curr);
1090                return;
1091        }
1092
1093        /*
1094         * Ensure that a task that missed wakeup preemption by a
1095         * narrow margin doesn't have to wait for a full slice.
1096         * This also mitigates buddy induced latencies under load.
1097         */
1098        if (!sched_feat(WAKEUP_PREEMPT))
1099                return;
1100
1101        if (delta_exec < sysctl_sched_min_granularity)
1102                return;
1103
1104        if (cfs_rq->nr_running > 1) {
1105                struct sched_entity *se = __pick_first_entity(cfs_rq);
1106                s64 delta = curr->vruntime - se->vruntime;
1107
1108                if (delta < 0)
1109                        return;
1110
1111                if (delta > ideal_runtime)
1112                        resched_task(rq_of(cfs_rq)->curr);
1113        }
1114}
1115
1116static void
1117set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
1118{
1119        /* 'current' is not kept within the tree. */
1120        if (se->on_rq) {
1121                /*
1122                 * Any task has to be enqueued before it get to execute on
1123                 * a CPU. So account for the time it spent waiting on the
1124                 * runqueue.
1125                 */
1126                update_stats_wait_end(cfs_rq, se);
1127                __dequeue_entity(cfs_rq, se);
1128        }
1129
1130        update_stats_curr_start(cfs_rq, se);
1131        cfs_rq->curr = se;
1132#ifdef CONFIG_SCHEDSTATS
1133        /*
1134         * Track our maximum slice length, if the CPU's load is at
1135         * least twice that of our own weight (i.e. dont track it
1136         * when there are only lesser-weight tasks around):
1137         */
1138        if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
1139                se->statistics.slice_max = max(se->statistics.slice_max,
1140                        se->sum_exec_runtime - se->prev_sum_exec_runtime);
1141        }
1142#endif
1143        se->prev_sum_exec_runtime = se->sum_exec_runtime;
1144}
1145
1146static int
1147wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
1148
1149/*
1150 * Pick the next process, keeping these things in mind, in this order:
1151 * 1) keep things fair between processes/task groups
1152 * 2) pick the "next" process, since someone really wants that to run
1153 * 3) pick the "last" process, for cache locality
1154 * 4) do not run the "skip" process, if something else is available
1155 */
1156static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
1157{
1158        struct sched_entity *se = __pick_first_entity(cfs_rq);
1159        struct sched_entity *left = se;
1160
1161        /*
1162         * Avoid running the skip buddy, if running something else can
1163         * be done without getting too unfair.
1164         */
1165        if (cfs_rq->skip == se) {
1166                struct sched_entity *second = __pick_next_entity(se);
1167                if (second && wakeup_preempt_entity(second, left) < 1)
1168                        se = second;
1169        }
1170
1171        /*
1172         * Prefer last buddy, try to return the CPU to a preempted task.
1173         */
1174        if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
1175                se = cfs_rq->last;
1176
1177        /*
1178         * Someone really wants this to run. If it's not unfair, run it.
1179         */
1180        if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
1181                se = cfs_rq->next;
1182
1183        clear_buddies(cfs_rq, se);
1184
1185        return se;
1186}
1187
1188static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
1189{
1190        /*
1191         * If still on the runqueue then deactivate_task()
1192         * was not called and update_curr() has to be done:
1193         */
1194        if (prev->on_rq)
1195                update_curr(cfs_rq);
1196
1197        check_spread(cfs_rq, prev);
1198        if (prev->on_rq) {
1199                update_stats_wait_start(cfs_rq, prev);
1200                /* Put 'current' back into the tree. */
1201                __enqueue_entity(cfs_rq, prev);
1202        }
1203        cfs_rq->curr = NULL;
1204}
1205
1206static void
1207entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
1208{
1209        /*
1210         * Update run-time statistics of the 'current'.
1211         */
1212        update_curr(cfs_rq);
1213
1214        /*
1215         * Update share accounting for long-running entities.
1216         */
1217        update_entity_shares_tick(cfs_rq);
1218
1219#ifdef CONFIG_SCHED_HRTICK
1220        /*
1221         * queued ticks are scheduled to match the slice, so don't bother
1222         * validating it and just reschedule.
1223         */
1224        if (queued) {
1225                resched_task(rq_of(cfs_rq)->curr);
1226                return;
1227        }
1228        /*
1229         * don't let the period tick interfere with the hrtick preemption
1230         */
1231        if (!sched_feat(DOUBLE_TICK) &&
1232                        hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
1233                return;
1234#endif
1235
1236        if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
1237                check_preempt_tick(cfs_rq, curr);
1238}
1239
1240/**************************************************
1241 * CFS operations on tasks:
1242 */
1243
1244#ifdef CONFIG_SCHED_HRTICK
1245static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
1246{
1247        struct sched_entity *se = &p->se;
1248        struct cfs_rq *cfs_rq = cfs_rq_of(se);
1249
1250        WARN_ON(task_rq(p) != rq);
1251
1252        if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
1253                u64 slice = sched_slice(cfs_rq, se);
1254                u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
1255                s64 delta = slice - ran;
1256
1257                if (delta < 0) {
1258                        if (rq->curr == p)
1259                                resched_task(p);
1260                        return;
1261                }
1262
1263                /*
1264                 * Don't schedule slices shorter than 10000ns, that just
1265                 * doesn't make sense. Rely on vruntime for fairness.
1266                 */
1267                if (rq->curr != p)
1268                        delta = max_t(s64, 10000LL, delta);
1269
1270                hrtick_start(rq, delta);
1271        }
1272}
1273
1274/*
1275 * called from enqueue/dequeue and updates the hrtick when the
1276 * current task is from our class and nr_running is low enough
1277 * to matter.
1278 */
1279static void hrtick_update(struct rq *rq)
1280{
1281        struct task_struct *curr = rq->curr;
1282
1283        if (curr->sched_class != &fair_sched_class)
1284                return;
1285
1286        if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
1287                hrtick_start_fair(rq, curr);
1288}
1289#else /* !CONFIG_SCHED_HRTICK */
1290static inline void
1291hrtick_start_fair(struct rq *rq, struct task_struct *p)
1292{
1293}
1294
1295static inline void hrtick_update(struct rq *rq)
1296{
1297}
1298#endif
1299
1300/*
1301 * The enqueue_task method is called before nr_running is
1302 * increased. Here we update the fair scheduling stats and
1303 * then put the task into the rbtree:
1304 */
1305static void
1306enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1307{
1308        struct cfs_rq *cfs_rq;
1309        struct sched_entity *se = &p->se;
1310
1311        for_each_sched_entity(se) {
1312                if (se->on_rq)
1313                        break;
1314                cfs_rq = cfs_rq_of(se);
1315                enqueue_entity(cfs_rq, se, flags);
1316                flags = ENQUEUE_WAKEUP;
1317        }
1318
1319        for_each_sched_entity(se) {
1320                cfs_rq = cfs_rq_of(se);
1321
1322                update_cfs_load(cfs_rq, 0);
1323                update_cfs_shares(cfs_rq);
1324        }
1325
1326        hrtick_update(rq);
1327}
1328
1329static void set_next_buddy(struct sched_entity *se);
1330
1331/*
1332 * The dequeue_task method is called before nr_running is
1333 * decreased. We remove the task from the rbtree and
1334 * update the fair scheduling stats:
1335 */
1336static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1337{
1338        struct cfs_rq *cfs_rq;
1339        struct sched_entity *se = &p->se;
1340        int task_sleep = flags & DEQUEUE_SLEEP;
1341
1342        for_each_sched_entity(se) {
1343                cfs_rq = cfs_rq_of(se);
1344                dequeue_entity(cfs_rq, se, flags);
1345
1346                /* Don't dequeue parent if it has other entities besides us */
1347                if (cfs_rq->load.weight) {
1348                        /*
1349                         * Bias pick_next to pick a task from this cfs_rq, as
1350                         * p is sleeping when it is within its sched_slice.
1351                         */
1352                        if (task_sleep && parent_entity(se))
1353                                set_next_buddy(parent_entity(se));
1354
1355                        /* avoid re-evaluating load for this entity */
1356                        se = parent_entity(se);
1357                        break;
1358                }
1359                flags |= DEQUEUE_SLEEP;
1360        }
1361
1362        for_each_sched_entity(se) {
1363                cfs_rq = cfs_rq_of(se);
1364
1365                update_cfs_load(cfs_rq, 0);
1366                update_cfs_shares(cfs_rq);
1367        }
1368
1369        hrtick_update(rq);
1370}
1371
1372#ifdef CONFIG_SMP
1373
1374static void task_waking_fair(struct task_struct *p)
1375{
1376        struct sched_entity *se = &p->se;
1377        struct cfs_rq *cfs_rq = cfs_rq_of(se);
1378        u64 min_vruntime;
1379
1380#ifndef CONFIG_64BIT
1381        u64 min_vruntime_copy;
1382
1383        do {
1384                min_vruntime_copy = cfs_rq->min_vruntime_copy;
1385                smp_rmb();
1386                min_vruntime = cfs_rq->min_vruntime;
1387        } while (min_vruntime != min_vruntime_copy);
1388#else
1389        min_vruntime = cfs_rq->min_vruntime;
1390#endif
1391
1392        se->vruntime -= min_vruntime;
1393}
1394
1395#ifdef CONFIG_FAIR_GROUP_SCHED
1396/*
1397 * effective_load() calculates the load change as seen from the root_task_group
1398 *
1399 * Adding load to a group doesn't make a group heavier, but can cause movement
1400 * of group shares between cpus. Assuming the shares were perfectly aligned one
1401 * can calculate the shift in shares.
1402 */
1403static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
1404{
1405        struct sched_entity *se = tg->se[cpu];
1406
1407        if (!tg->parent)
1408                return wl;
1409
1410        for_each_sched_entity(se) {
1411                long lw, w;
1412
1413                tg = se->my_q->tg;
1414                w = se->my_q->load.weight;
1415
1416                /* use this cpu's instantaneous contribution */
1417                lw = atomic_read(&tg->load_weight);
1418                lw -= se->my_q->load_contribution;
1419                lw += w + wg;
1420
1421                wl += w;
1422
1423                if (lw > 0 && wl < lw)
1424                        wl = (wl * tg->shares) / lw;
1425                else
1426                        wl = tg->shares;
1427
1428                /* zero point is MIN_SHARES */
1429                if (wl < MIN_SHARES)
1430                        wl = MIN_SHARES;
1431                wl -= se->load.weight;
1432                wg = 0;
1433        }
1434
1435        return wl;
1436}
1437
1438#else
1439
1440static inline unsigned long effective_load(struct task_group *tg, int cpu,
1441                unsigned long wl, unsigned long wg)
1442{
1443        return wl;
1444}
1445
1446#endif
1447
1448static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1449{
1450        s64 this_load, load;
1451        int idx, this_cpu, prev_cpu;
1452        unsigned long tl_per_task;
1453        struct task_group *tg;
1454        unsigned long weight;
1455        int balanced;
1456
1457        idx       = sd->wake_idx;
1458        this_cpu  = smp_processor_id();
1459        prev_cpu  = task_cpu(p);
1460        load      = source_load(prev_cpu, idx);
1461        this_load = target_load(this_cpu, idx);
1462
1463        /*
1464         * If sync wakeup then subtract the (maximum possible)
1465         * effect of the currently running task from the load
1466         * of the current CPU:
1467         */
1468        if (sync) {
1469                tg = task_group(current);
1470                weight = current->se.load.weight;
1471
1472                this_load += effective_load(tg, this_cpu, -weight, -weight);
1473                load += effective_load(tg, prev_cpu, 0, -weight);
1474        }
1475
1476        tg = task_group(p);
1477        weight = p->se.load.weight;
1478
1479        /*
1480         * In low-load situations, where prev_cpu is idle and this_cpu is idle
1481         * due to the sync cause above having dropped this_load to 0, we'll
1482         * always have an imbalance, but there's really nothing you can do
1483         * about that, so that's good too.
1484         *
1485         * Otherwise check if either cpus are near enough in load to allow this
1486         * task to be woken on this_cpu.
1487         */
1488        if (this_load > 0) {
1489                s64 this_eff_load, prev_eff_load;
1490
1491                this_eff_load = 100;
1492                this_eff_load *= power_of(prev_cpu);
1493                this_eff_load *= this_load +
1494                        effective_load(tg, this_cpu, weight, weight);
1495
1496                prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
1497                prev_eff_load *= power_of(this_cpu);
1498                prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
1499
1500                balanced = this_eff_load <= prev_eff_load;
1501        } else
1502                balanced = true;
1503
1504        /*
1505         * If the currently running task will sleep within
1506         * a reasonable amount of time then attract this newly
1507         * woken task:
1508         */
1509        if (sync && balanced)
1510                return 1;
1511
1512        schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
1513        tl_per_task = cpu_avg_load_per_task(this_cpu);
1514
1515        if (balanced ||
1516            (this_load <= load &&
1517             this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
1518                /*
1519                 * This domain has SD_WAKE_AFFINE and
1520                 * p is cache cold in this domain, and
1521                 * there is no bad imbalance.
1522                 */
1523                schedstat_inc(sd, ttwu_move_affine);
1524                schedstat_inc(p, se.statistics.nr_wakeups_affine);
1525
1526                return 1;
1527        }
1528        return 0;
1529}
1530
1531/*
1532 * find_idlest_group finds and returns the least busy CPU group within the
1533 * domain.
1534 */
1535static struct sched_group *
1536find_idlest_group(struct sched_domain *sd, struct task_struct *p,
1537                  int this_cpu, int load_idx)
1538{
1539        struct sched_group *idlest = NULL, *group = sd->groups;
1540        unsigned long min_load = ULONG_MAX, this_load = 0;
1541        int imbalance = 100 + (sd->imbalance_pct-100)/2;
1542
1543        do {
1544                unsigned long load, avg_load;
1545                int local_group;
1546                int i;
1547
1548                /* Skip over this group if it has no CPUs allowed */
1549                if (!cpumask_intersects(sched_group_cpus(group),
1550                                        &p->cpus_allowed))
1551                        continue;
1552
1553                local_group = cpumask_test_cpu(this_cpu,
1554                                               sched_group_cpus(group));
1555
1556                /* Tally up the load of all CPUs in the group */
1557                avg_load = 0;
1558
1559                for_each_cpu(i, sched_group_cpus(group)) {
1560                        /* Bias balancing toward cpus of our domain */
1561                        if (local_group)
1562                                load = source_load(i, load_idx);
1563                        else
1564                                load = target_load(i, load_idx);
1565
1566                        avg_load += load;
1567                }
1568
1569                /* Adjust by relative CPU power of the group */
1570                avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
1571
1572                if (local_group) {
1573                        this_load = avg_load;
1574                } else if (avg_load < min_load) {
1575                        min_load = avg_load;
1576                        idlest = group;
1577                }
1578        } while (group = group->next, group != sd->groups);
1579
1580        if (!idlest || 100*this_load < imbalance*min_load)
1581                return NULL;
1582        return idlest;
1583}
1584
1585/*
1586 * find_idlest_cpu - find the idlest cpu among the cpus in group.
1587 */
1588static int
1589find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1590{
1591        unsigned long load, min_load = ULONG_MAX;
1592        int idlest = -1;
1593        int i;
1594
1595        /* Traverse only the allowed CPUs */
1596        for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
1597                load = weighted_cpuload(i);
1598
1599                if (load < min_load || (load == min_load && i == this_cpu)) {
1600                        min_load = load;
1601                        idlest = i;
1602                }
1603        }
1604
1605        return idlest;
1606}
1607
1608/*
1609 * Try and locate an idle CPU in the sched_domain.
1610 */
1611static int select_idle_sibling(struct task_struct *p, int target)
1612{
1613        int cpu = smp_processor_id();
1614        int prev_cpu = task_cpu(p);
1615        struct sched_domain *sd;
1616        int i;
1617
1618        /*
1619         * If the task is going to be woken-up on this cpu and if it is
1620         * already idle, then it is the right target.
1621         */
1622        if (target == cpu && idle_cpu(cpu))
1623                return cpu;
1624
1625        /*
1626         * If the task is going to be woken-up on the cpu where it previously
1627         * ran and if it is currently idle, then it the right target.
1628         */
1629        if (target == prev_cpu && idle_cpu(prev_cpu))
1630                return prev_cpu;
1631
1632        /*
1633         * Otherwise, iterate the domains and find an elegible idle cpu.
1634         */
1635        rcu_read_lock();
1636        for_each_domain(target, sd) {
1637                if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
1638                        break;
1639
1640                for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
1641                        if (idle_cpu(i)) {
1642                                target = i;
1643                                break;
1644                        }
1645                }
1646
1647                /*
1648                 * Lets stop looking for an idle sibling when we reached
1649                 * the domain that spans the current cpu and prev_cpu.
1650                 */
1651                if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
1652                    cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
1653                        break;
1654        }
1655        rcu_read_unlock();
1656
1657        return target;
1658}
1659
1660/*
1661 * sched_balance_self: balance the current task (running on cpu) in domains
1662 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
1663 * SD_BALANCE_EXEC.
1664 *
1665 * Balance, ie. select the least loaded group.
1666 *
1667 * Returns the target CPU number, or the same CPU if no balancing is needed.
1668 *
1669 * preempt must be disabled.
1670 */
1671static int
1672select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
1673{
1674        struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
1675        int cpu = smp_processor_id();
1676        int prev_cpu = task_cpu(p);
1677        int new_cpu = cpu;
1678        int want_affine = 0;
1679        int want_sd = 1;
1680        int sync = wake_flags & WF_SYNC;
1681
1682        if (sd_flag & SD_BALANCE_WAKE) {
1683                if (cpumask_test_cpu(cpu, &p->cpus_allowed))
1684                        want_affine = 1;
1685                new_cpu = prev_cpu;
1686        }
1687
1688        rcu_read_lock();
1689        for_each_domain(cpu, tmp) {
1690                if (!(tmp->flags & SD_LOAD_BALANCE))
1691                        continue;
1692
1693                /*
1694                 * If power savings logic is enabled for a domain, see if we
1695                 * are not overloaded, if so, don't balance wider.
1696                 */
1697                if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) {
1698                        unsigned long power = 0;
1699                        unsigned long nr_running = 0;
1700                        unsigned long capacity;
1701                        int i;
1702
1703                        for_each_cpu(i, sched_domain_span(tmp)) {
1704                                power += power_of(i);
1705                                nr_running += cpu_rq(i)->cfs.nr_running;
1706                        }
1707
1708                        capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
1709
1710                        if (tmp->flags & SD_POWERSAVINGS_BALANCE)
1711                                nr_running /= 2;
1712
1713                        if (nr_running < capacity)
1714                                want_sd = 0;
1715                }
1716
1717                /*
1718                 * If both cpu and prev_cpu are part of this domain,
1719                 * cpu is a valid SD_WAKE_AFFINE target.
1720                 */
1721                if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
1722                    cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
1723                        affine_sd = tmp;
1724                        want_affine = 0;
1725                }
1726
1727                if (!want_sd && !want_affine)
1728                        break;
1729
1730                if (!(tmp->flags & sd_flag))
1731                        continue;
1732
1733                if (want_sd)
1734                        sd = tmp;
1735        }
1736
1737        if (affine_sd) {
1738                if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
1739                        prev_cpu = cpu;
1740
1741                new_cpu = select_idle_sibling(p, prev_cpu);
1742                goto unlock;
1743        }
1744
1745        while (sd) {
1746                int load_idx = sd->forkexec_idx;
1747                struct sched_group *group;
1748                int weight;
1749
1750                if (!(sd->flags & sd_flag)) {
1751                        sd = sd->child;
1752                        continue;
1753                }
1754
1755                if (sd_flag & SD_BALANCE_WAKE)
1756                        load_idx = sd->wake_idx;
1757
1758                group = find_idlest_group(sd, p, cpu, load_idx);
1759                if (!group) {
1760                        sd = sd->child;
1761                        continue;
1762                }
1763
1764                new_cpu = find_idlest_cpu(group, p, cpu);
1765                if (new_cpu == -1 || new_cpu == cpu) {
1766                        /* Now try balancing at a lower domain level of cpu */
1767                        sd = sd->child;
1768                        continue;
1769                }
1770
1771                /* Now try balancing at a lower domain level of new_cpu */
1772                cpu = new_cpu;
1773                weight = sd->span_weight;
1774                sd = NULL;
1775                for_each_domain(cpu, tmp) {
1776                        if (weight <= tmp->span_weight)
1777                                break;
1778                        if (tmp->flags & sd_flag)
1779                                sd = tmp;
1780                }
1781                /* while loop will break here if sd == NULL */
1782        }
1783unlock:
1784        rcu_read_unlock();
1785
1786        return new_cpu;
1787}
1788#endif /* CONFIG_SMP */
1789
1790static unsigned long
1791wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
1792{
1793        unsigned long gran = sysctl_sched_wakeup_granularity;
1794
1795        /*
1796         * Since its curr running now, convert the gran from real-time
1797         * to virtual-time in his units.
1798         *
1799         * By using 'se' instead of 'curr' we penalize light tasks, so
1800         * they get preempted easier. That is, if 'se' < 'curr' then
1801         * the resulting gran will be larger, therefore penalizing the
1802         * lighter, if otoh 'se' > 'curr' then the resulting gran will
1803         * be smaller, again penalizing the lighter task.
1804         *
1805         * This is especially important for buddies when the leftmost
1806         * task is higher priority than the buddy.
1807         */
1808        return calc_delta_fair(gran, se);
1809}
1810
1811/*
1812 * Should 'se' preempt 'curr'.
1813 *
1814 *             |s1
1815 *        |s2
1816 *   |s3
1817 *         g
1818 *      |<--->|c
1819 *
1820 *  w(c, s1) = -1
1821 *  w(c, s2) =  0
1822 *  w(c, s3) =  1
1823 *
1824 */
1825static int
1826wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1827{
1828        s64 gran, vdiff = curr->vruntime - se->vruntime;
1829
1830        if (vdiff <= 0)
1831                return -1;
1832
1833        gran = wakeup_gran(curr, se);
1834        if (vdiff > gran)
1835                return 1;
1836
1837        return 0;
1838}
1839
1840static void set_last_buddy(struct sched_entity *se)
1841{
1842        if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
1843                return;
1844
1845        for_each_sched_entity(se)
1846                cfs_rq_of(se)->last = se;
1847}
1848
1849static void set_next_buddy(struct sched_entity *se)
1850{
1851        if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
1852                return;
1853
1854        for_each_sched_entity(se)
1855                cfs_rq_of(se)->next = se;
1856}
1857
1858static void set_skip_buddy(struct sched_entity *se)
1859{
1860        for_each_sched_entity(se)
1861                cfs_rq_of(se)->skip = se;
1862}
1863
1864/*
1865 * Preempt the current task with a newly woken task if needed:
1866 */
1867static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1868{
1869        struct task_struct *curr = rq->curr;
1870        struct sched_entity *se = &curr->se, *pse = &p->se;
1871        struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1872        int scale = cfs_rq->nr_running >= sched_nr_latency;
1873        int next_buddy_marked = 0;
1874
1875        if (unlikely(se == pse))
1876                return;
1877
1878        if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
1879                set_next_buddy(pse);
1880                next_buddy_marked = 1;
1881        }
1882
1883        /*
1884         * We can come here with TIF_NEED_RESCHED already set from new task
1885         * wake up path.
1886         */
1887        if (test_tsk_need_resched(curr))
1888                return;
1889
1890        /* Idle tasks are by definition preempted by non-idle tasks. */
1891        if (unlikely(curr->policy == SCHED_IDLE) &&
1892            likely(p->policy != SCHED_IDLE))
1893                goto preempt;
1894
1895        /*
1896         * Batch and idle tasks do not preempt non-idle tasks (their preemption
1897         * is driven by the tick):
1898         */
1899        if (unlikely(p->policy != SCHED_NORMAL))
1900                return;
1901
1902
1903        if (!sched_feat(WAKEUP_PREEMPT))
1904                return;
1905
1906        find_matching_se(&se, &pse);
1907        update_curr(cfs_rq_of(se));
1908        BUG_ON(!pse);
1909        if (wakeup_preempt_entity(se, pse) == 1) {
1910                /*
1911                 * Bias pick_next to pick the sched entity that is
1912                 * triggering this preemption.
1913                 */
1914                if (!next_buddy_marked)
1915                        set_next_buddy(pse);
1916                goto preempt;
1917        }
1918
1919        return;
1920
1921preempt:
1922        resched_task(curr);
1923        /*
1924         * Only set the backward buddy when the current task is still
1925         * on the rq. This can happen when a wakeup gets interleaved
1926         * with schedule on the ->pre_schedule() or idle_balance()
1927         * point, either of which can * drop the rq lock.
1928         *
1929         * Also, during early boot the idle thread is in the fair class,
1930         * for obvious reasons its a bad idea to schedule back to it.
1931         */
1932        if (unlikely(!se->on_rq || curr == rq->idle))
1933                return;
1934
1935        if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
1936                set_last_buddy(se);
1937}
1938
1939static struct task_struct *pick_next_task_fair(struct rq *rq)
1940{
1941        struct task_struct *p;
1942        struct cfs_rq *cfs_rq = &rq->cfs;
1943        struct sched_entity *se;
1944
1945        if (!cfs_rq->nr_running)
1946                return NULL;
1947
1948        do {
1949                se = pick_next_entity(cfs_rq);
1950                set_next_entity(cfs_rq, se);
1951                cfs_rq = group_cfs_rq(se);
1952        } while (cfs_rq);
1953
1954        p = task_of(se);
1955        hrtick_start_fair(rq, p);
1956
1957        return p;
1958}
1959
1960/*
1961 * Account for a descheduled task:
1962 */
1963static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
1964{
1965        struct sched_entity *se = &prev->se;
1966        struct cfs_rq *cfs_rq;
1967
1968        for_each_sched_entity(se) {
1969                cfs_rq = cfs_rq_of(se);
1970                put_prev_entity(cfs_rq, se);
1971        }
1972}
1973
1974/*
1975 * sched_yield() is very simple
1976 *
1977 * The magic of dealing with the ->skip buddy is in pick_next_entity.
1978 */
1979static void yield_task_fair(struct rq *rq)
1980{
1981        struct task_struct *curr = rq->curr;
1982        struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1983        struct sched_entity *se = &curr->se;
1984
1985        /*
1986         * Are we the only task in the tree?
1987         */
1988        if (unlikely(rq->nr_running == 1))
1989                return;
1990
1991        clear_buddies(cfs_rq, se);
1992
1993        if (curr->policy != SCHED_BATCH) {
1994                update_rq_clock(rq);
1995                /*
1996                 * Update run-time statistics of the 'current'.
1997                 */
1998                update_curr(cfs_rq);
1999        }
2000
2001        set_skip_buddy(se);
2002}
2003
2004static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
2005{
2006        struct sched_entity *se = &p->se;
2007
2008        if (!se->on_rq)
2009                return false;
2010
2011        /* Tell the scheduler that we'd really like pse to run next. */
2012        set_next_buddy(se);
2013
2014        yield_task_fair(rq);
2015
2016        return true;
2017}
2018
2019#ifdef CONFIG_SMP
2020/**************************************************
2021 * Fair scheduling class load-balancing methods:
2022 */
2023
2024/*
2025 * pull_task - move a task from a remote runqueue to the local runqueue.
2026 * Both runqueues must be locked.
2027 */
2028static void pull_task(struct rq *src_rq, struct task_struct *p,
2029                      struct rq *this_rq, int this_cpu)
2030{
2031        deactivate_task(src_rq, p, 0);
2032        set_task_cpu(p, this_cpu);
2033        activate_task(this_rq, p, 0);
2034        check_preempt_curr(this_rq, p, 0);
2035}
2036
2037/*
2038 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
2039 */
2040static
2041int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2042                     struct sched_domain *sd, enum cpu_idle_type idle,
2043                     int *all_pinned)
2044{
2045        int tsk_cache_hot = 0;
2046        /*
2047         * We do not migrate tasks that are:
2048         * 1) running (obviously), or
2049         * 2) cannot be migrated to this CPU due to cpus_allowed, or
2050         * 3) are cache-hot on their current CPU.
2051         */
2052        if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
2053                schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
2054                return 0;
2055        }
2056        *all_pinned = 0;
2057
2058        if (task_running(rq, p)) {
2059                schedstat_inc(p, se.statistics.nr_failed_migrations_running);
2060                return 0;
2061        }
2062
2063        /*
2064         * Aggressive migration if:
2065         * 1) task is cache cold, or
2066         * 2) too many balance attempts have failed.
2067         */
2068
2069        tsk_cache_hot = task_hot(p, rq->clock_task, sd);
2070        if (!tsk_cache_hot ||
2071                sd->nr_balance_failed > sd->cache_nice_tries) {
2072#ifdef CONFIG_SCHEDSTATS
2073                if (tsk_cache_hot) {
2074                        schedstat_inc(sd, lb_hot_gained[idle]);
2075                        schedstat_inc(p, se.statistics.nr_forced_migrations);
2076                }
2077#endif
2078                return 1;
2079        }
2080
2081        if (tsk_cache_hot) {
2082                schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
2083                return 0;
2084        }
2085        return 1;
2086}
2087
2088/*
2089 * move_one_task tries to move exactly one task from busiest to this_rq, as
2090 * part of active balancing operations within "domain".
2091 * Returns 1 if successful and 0 otherwise.
2092 *
2093 * Called with both runqueues locked.
2094 */
2095static int
2096move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
2097              struct sched_domain *sd, enum cpu_idle_type idle)
2098{
2099        struct task_struct *p, *n;
2100        struct cfs_rq *cfs_rq;
2101        int pinned = 0;
2102
2103        for_each_leaf_cfs_rq(busiest, cfs_rq) {
2104                list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) {
2105
2106                        if (!can_migrate_task(p, busiest, this_cpu,
2107                                                sd, idle, &pinned))
2108                                continue;
2109
2110                        pull_task(busiest, p, this_rq, this_cpu);
2111                        /*
2112                         * Right now, this is only the second place pull_task()
2113                         * is called, so we can safely collect pull_task()
2114                         * stats here rather than inside pull_task().
2115                         */
2116                        schedstat_inc(sd, lb_gained[idle]);
2117                        return 1;
2118                }
2119        }
2120
2121        return 0;
2122}
2123
2124static unsigned long
2125balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2126              unsigned long max_load_move, struct sched_domain *sd,
2127              enum cpu_idle_type idle, int *all_pinned,
2128              struct cfs_rq *busiest_cfs_rq)
2129{
2130        int loops = 0, pulled = 0;
2131        long rem_load_move = max_load_move;
2132        struct task_struct *p, *n;
2133
2134        if (max_load_move == 0)
2135                goto out;
2136
2137        list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
2138                if (loops++ > sysctl_sched_nr_migrate)
2139                        break;
2140
2141                if ((p->se.load.weight >> 1) > rem_load_move ||
2142                    !can_migrate_task(p, busiest, this_cpu, sd, idle,
2143                                      all_pinned))
2144                        continue;
2145
2146                pull_task(busiest, p, this_rq, this_cpu);
2147                pulled++;
2148                rem_load_move -= p->se.load.weight;
2149
2150#ifdef CONFIG_PREEMPT
2151                /*
2152                 * NEWIDLE balancing is a source of latency, so preemptible
2153                 * kernels will stop after the first task is pulled to minimize
2154                 * the critical section.
2155                 */
2156                if (idle == CPU_NEWLY_IDLE)
2157                        break;
2158#endif
2159
2160                /*
2161                 * We only want to steal up to the prescribed amount of
2162                 * weighted load.
2163                 */
2164                if (rem_load_move <= 0)
2165                        break;
2166        }
2167out:
2168        /*
2169         * Right now, this is one of only two places pull_task() is called,
2170         * so we can safely collect pull_task() stats here rather than
2171         * inside pull_task().
2172         */
2173        schedstat_add(sd, lb_gained[idle], pulled);
2174
2175        return max_load_move - rem_load_move;
2176}
2177
2178#ifdef CONFIG_FAIR_GROUP_SCHED
2179/*
2180 * update tg->load_weight by folding this cpu's load_avg
2181 */
2182static int update_shares_cpu(struct task_group *tg, int cpu)
2183{
2184        struct cfs_rq *cfs_rq;
2185        unsigned long flags;
2186        struct rq *rq;
2187
2188        if (!tg->se[cpu])
2189                return 0;
2190
2191        rq = cpu_rq(cpu);
2192        cfs_rq = tg->cfs_rq[cpu];
2193
2194        raw_spin_lock_irqsave(&rq->lock, flags);
2195
2196        update_rq_clock(rq);
2197        update_cfs_load(cfs_rq, 1);
2198
2199        /*
2200         * We need to update shares after updating tg->load_weight in
2201         * order to adjust the weight of groups with long running tasks.
2202         */
2203        update_cfs_shares(cfs_rq);
2204
2205        raw_spin_unlock_irqrestore(&rq->lock, flags);
2206
2207        return 0;
2208}
2209
2210static void update_shares(int cpu)
2211{
2212        struct cfs_rq *cfs_rq;
2213        struct rq *rq = cpu_rq(cpu);
2214
2215        rcu_read_lock();
2216        /*
2217         * Iterates the task_group tree in a bottom up fashion, see
2218         * list_add_leaf_cfs_rq() for details.
2219         */
2220        for_each_leaf_cfs_rq(rq, cfs_rq)
2221                update_shares_cpu(cfs_rq->tg, cpu);
2222        rcu_read_unlock();
2223}
2224
2225/*
2226 * Compute the cpu's hierarchical load factor for each task group.
2227 * This needs to be done in a top-down fashion because the load of a child
2228 * group is a fraction of its parents load.
2229 */
2230static int tg_load_down(struct task_group *tg, void *data)
2231{
2232        unsigned long load;
2233        long cpu = (long)data;
2234
2235        if (!tg->parent) {
2236                load = cpu_rq(cpu)->load.weight;
2237        } else {
2238                load = tg->parent->cfs_rq[cpu]->h_load;
2239                load *= tg->se[cpu]->load.weight;
2240                load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
2241        }
2242
2243        tg->cfs_rq[cpu]->h_load = load;
2244
2245        return 0;
2246}
2247
2248static void update_h_load(long cpu)
2249{
2250        walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
2251}
2252
2253static unsigned long
2254load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
2255                  unsigned long max_load_move,
2256                  struct sched_domain *sd, enum cpu_idle_type idle,
2257                  int *all_pinned)
2258{
2259        long rem_load_move = max_load_move;
2260        struct cfs_rq *busiest_cfs_rq;
2261
2262        rcu_read_lock();
2263        update_h_load(cpu_of(busiest));
2264
2265        for_each_leaf_cfs_rq(busiest, busiest_cfs_rq) {
2266                unsigned long busiest_h_load = busiest_cfs_rq->h_load;
2267                unsigned long busiest_weight = busiest_cfs_rq->load.weight;
2268                u64 rem_load, moved_load;
2269
2270                /*
2271                 * empty group
2272                 */
2273                if (!busiest_cfs_rq->task_weight)
2274                        continue;
2275
2276                rem_load = (u64)rem_load_move * busiest_weight;
2277                rem_load = div_u64(rem_load, busiest_h_load + 1);
2278
2279                moved_load = balance_tasks(this_rq, this_cpu, busiest,
2280                                rem_load, sd, idle, all_pinned,
2281                                busiest_cfs_rq);
2282
2283                if (!moved_load)
2284                        continue;
2285
2286                moved_load *= busiest_h_load;
2287                moved_load = div_u64(moved_load, busiest_weight + 1);
2288
2289                rem_load_move -= moved_load;
2290                if (rem_load_move < 0)
2291                        break;
2292        }
2293        rcu_read_unlock();
2294
2295        return max_load_move - rem_load_move;
2296}
2297#else
2298static inline void update_shares(int cpu)
2299{
2300}
2301
2302static unsigned long
2303load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
2304                  unsigned long max_load_move,
2305                  struct sched_domain *sd, enum cpu_idle_type idle,
2306                  int *all_pinned)
2307{
2308        return balance_tasks(this_rq, this_cpu, busiest,
2309                        max_load_move, sd, idle, all_pinned,
2310                        &busiest->cfs);
2311}
2312#endif
2313
2314/*
2315 * move_tasks tries to move up to max_load_move weighted load from busiest to
2316 * this_rq, as part of a balancing operation within domain "sd".
2317 * Returns 1 if successful and 0 otherwise.
2318 *
2319 * Called with both runqueues locked.
2320 */
2321static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2322                      unsigned long max_load_move,
2323                      struct sched_domain *sd, enum cpu_idle_type idle,
2324                      int *all_pinned)
2325{
2326        unsigned long total_load_moved = 0, load_moved;
2327
2328        do {
2329                load_moved = load_balance_fair(this_rq, this_cpu, busiest,
2330                                max_load_move - total_load_moved,
2331                                sd, idle, all_pinned);
2332
2333                total_load_moved += load_moved;
2334
2335#ifdef CONFIG_PREEMPT
2336                /*
2337                 * NEWIDLE balancing is a source of latency, so preemptible
2338                 * kernels will stop after the first task is pulled to minimize
2339                 * the critical section.
2340                 */
2341                if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
2342                        break;
2343
2344                if (raw_spin_is_contended(&this_rq->lock) ||
2345                                raw_spin_is_contended(&busiest->lock))
2346                        break;
2347#endif
2348        } while (load_moved && max_load_move > total_load_moved);
2349
2350        return total_load_moved > 0;
2351}
2352
2353/********** Helpers for find_busiest_group ************************/
2354/*
2355 * sd_lb_stats - Structure to store the statistics of a sched_domain
2356 *              during load balancing.
2357 */
2358struct sd_lb_stats {
2359        struct sched_group *busiest; /* Busiest group in this sd */
2360        struct sched_group *this;  /* Local group in this sd */
2361        unsigned long total_load;  /* Total load of all groups in sd */
2362        unsigned long total_pwr;   /*   Total power of all groups in sd */
2363        unsigned long avg_load;    /* Average load across all groups in sd */
2364
2365        /** Statistics of this group */
2366        unsigned long this_load;
2367        unsigned long this_load_per_task;
2368        unsigned long this_nr_running;
2369        unsigned long this_has_capacity;
2370        unsigned int  this_idle_cpus;
2371
2372        /* Statistics of the busiest group */
2373        unsigned int  busiest_idle_cpus;
2374        unsigned long max_load;
2375        unsigned long busiest_load_per_task;
2376        unsigned long busiest_nr_running;
2377        unsigned long busiest_group_capacity;
2378        unsigned long busiest_has_capacity;
2379        unsigned int  busiest_group_weight;
2380
2381        int group_imb; /* Is there imbalance in this sd */
2382#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2383        int power_savings_balance; /* Is powersave balance needed for this sd */
2384        struct sched_group *group_min; /* Least loaded group in sd */
2385        struct sched_group *group_leader; /* Group which relieves group_min */
2386        unsigned long min_load_per_task; /* load_per_task in group_min */
2387        unsigned long leader_nr_running; /* Nr running of group_leader */
2388        unsigned long min_nr_running; /* Nr running of group_min */
2389#endif
2390};
2391
2392/*
2393 * sg_lb_stats - stats of a sched_group required for load_balancing
2394 */
2395struct sg_lb_stats {
2396        unsigned long avg_load; /*Avg load across the CPUs of the group */
2397        unsigned long group_load; /* Total load over the CPUs of the group */
2398        unsigned long sum_nr_running; /* Nr tasks running in the group */
2399        unsigned long sum_weighted_load; /* Weighted load of group's tasks */
2400        unsigned long group_capacity;
2401        unsigned long idle_cpus;
2402        unsigned long group_weight;
2403        int group_imb; /* Is there an imbalance in the group ? */
2404        int group_has_capacity; /* Is there extra capacity in the group? */
2405};
2406
2407/**
2408 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
2409 * @group: The group whose first cpu is to be returned.
2410 */
2411static inline unsigned int group_first_cpu(struct sched_group *group)
2412{
2413        return cpumask_first(sched_group_cpus(group));
2414}
2415
2416/**
2417 * get_sd_load_idx - Obtain the load index for a given sched domain.
2418 * @sd: The sched_domain whose load_idx is to be obtained.
2419 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
2420 */
2421static inline int get_sd_load_idx(struct sched_domain *sd,
2422                                        enum cpu_idle_type idle)
2423{
2424        int load_idx;
2425
2426        switch (idle) {
2427        case CPU_NOT_IDLE:
2428                load_idx = sd->busy_idx;
2429                break;
2430
2431        case CPU_NEWLY_IDLE:
2432                load_idx = sd->newidle_idx;
2433                break;
2434        default:
2435                load_idx = sd->idle_idx;
2436                break;
2437        }
2438
2439        return load_idx;
2440}
2441
2442
2443#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2444/**
2445 * init_sd_power_savings_stats - Initialize power savings statistics for
2446 * the given sched_domain, during load balancing.
2447 *
2448 * @sd: Sched domain whose power-savings statistics are to be initialized.
2449 * @sds: Variable containing the statistics for sd.
2450 * @idle: Idle status of the CPU at which we're performing load-balancing.
2451 */
2452static inline void init_sd_power_savings_stats(struct sched_domain *sd,
2453        struct sd_lb_stats *sds, enum cpu_idle_type idle)
2454{
2455        /*
2456         * Busy processors will not participate in power savings
2457         * balance.
2458         */
2459        if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
2460                sds->power_savings_balance = 0;
2461        else {
2462                sds->power_savings_balance = 1;
2463                sds->min_nr_running = ULONG_MAX;
2464                sds->leader_nr_running = 0;
2465        }
2466}
2467
2468/**
2469 * update_sd_power_savings_stats - Update the power saving stats for a
2470 * sched_domain while performing load balancing.
2471 *
2472 * @group: sched_group belonging to the sched_domain under consideration.
2473 * @sds: Variable containing the statistics of the sched_domain
2474 * @local_group: Does group contain the CPU for which we're performing
2475 *              load balancing ?
2476 * @sgs: Variable containing the statistics of the group.
2477 */
2478static inline void update_sd_power_savings_stats(struct sched_group *group,
2479        struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
2480{
2481
2482        if (!sds->power_savings_balance)
2483                return;
2484
2485        /*
2486         * If the local group is idle or completely loaded
2487         * no need to do power savings balance at this domain
2488         */
2489        if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
2490                                !sds->this_nr_running))
2491                sds->power_savings_balance = 0;
2492
2493        /*
2494         * If a group is already running at full capacity or idle,
2495         * don't include that group in power savings calculations
2496         */
2497        if (!sds->power_savings_balance ||
2498                sgs->sum_nr_running >= sgs->group_capacity ||
2499                !sgs->sum_nr_running)
2500                return;
2501
2502        /*
2503         * Calculate the group which has the least non-idle load.
2504         * This is the group from where we need to pick up the load
2505         * for saving power
2506         */
2507        if ((sgs->sum_nr_running < sds->min_nr_running) ||
2508            (sgs->sum_nr_running == sds->min_nr_running &&
2509             group_first_cpu(group) > group_first_cpu(sds->group_min))) {
2510                sds->group_min = group;
2511                sds->min_nr_running = sgs->sum_nr_running;
2512                sds->min_load_per_task = sgs->sum_weighted_load /
2513                                                sgs->sum_nr_running;
2514        }
2515
2516        /*
2517         * Calculate the group which is almost near its
2518         * capacity but still has some space to pick up some load
2519         * from other group and save more power
2520         */
2521        if (sgs->sum_nr_running + 1 > sgs->group_capacity)
2522                return;
2523
2524        if (sgs->sum_nr_running > sds->leader_nr_running ||
2525            (sgs->sum_nr_running == sds->leader_nr_running &&
2526             group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
2527                sds->group_leader = group;
2528                sds->leader_nr_running = sgs->sum_nr_running;
2529        }
2530}
2531
2532/**
2533 * check_power_save_busiest_group - see if there is potential for some power-savings balance
2534 * @sds: Variable containing the statistics of the sched_domain
2535 *      under consideration.
2536 * @this_cpu: Cpu at which we're currently performing load-balancing.
2537 * @imbalance: Variable to store the imbalance.
2538 *
2539 * Description:
2540 * Check if we have potential to perform some power-savings balance.
2541 * If yes, set the busiest group to be the least loaded group in the
2542 * sched_domain, so that it's CPUs can be put to idle.
2543 *
2544 * Returns 1 if there is potential to perform power-savings balance.
2545 * Else returns 0.
2546 */
2547static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
2548                                        int this_cpu, unsigned long *imbalance)
2549{
2550        if (!sds->power_savings_balance)
2551                return 0;
2552
2553        if (sds->this != sds->group_leader ||
2554                        sds->group_leader == sds->group_min)
2555                return 0;
2556
2557        *imbalance = sds->min_load_per_task;
2558        sds->busiest = sds->group_min;
2559
2560        return 1;
2561
2562}
2563#else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
2564static inline void init_sd_power_savings_stats(struct sched_domain *sd,
2565        struct sd_lb_stats *sds, enum cpu_idle_type idle)
2566{
2567        return;
2568}
2569
2570static inline void update_sd_power_savings_stats(struct sched_group *group,
2571        struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
2572{
2573        return;
2574}
2575
2576static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
2577                                        int this_cpu, unsigned long *imbalance)
2578{
2579        return 0;
2580}
2581#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
2582
2583
2584unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
2585{
2586        return SCHED_POWER_SCALE;
2587}
2588
2589unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
2590{
2591        return default_scale_freq_power(sd, cpu);
2592}
2593
2594unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
2595{
2596        unsigned long weight = sd->span_weight;
2597        unsigned long smt_gain = sd->smt_gain;
2598
2599        smt_gain /= weight;
2600
2601        return smt_gain;
2602}
2603
2604unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
2605{
2606        return default_scale_smt_power(sd, cpu);
2607}
2608
2609unsigned long scale_rt_power(int cpu)
2610{
2611        struct rq *rq = cpu_rq(cpu);
2612        u64 total, available;
2613
2614        total = sched_avg_period() + (rq->clock - rq->age_stamp);
2615
2616        if (unlikely(total < rq->rt_avg)) {
2617                /* Ensures that power won't end up being negative */
2618                available = 0;
2619        } else {
2620                available = total - rq->rt_avg;
2621        }
2622
2623        if (unlikely((s64)total < SCHED_POWER_SCALE))
2624                total = SCHED_POWER_SCALE;
2625
2626        total >>= SCHED_POWER_SHIFT;
2627
2628        return div_u64(available, total);
2629}
2630
2631static void update_cpu_power(struct sched_domain *sd, int cpu)
2632{
2633        unsigned long weight = sd->span_weight;
2634        unsigned long power = SCHED_POWER_SCALE;
2635        struct sched_group *sdg = sd->groups;
2636
2637        if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
2638                if (sched_feat(ARCH_POWER))
2639                        power *= arch_scale_smt_power(sd, cpu);
2640                else
2641                        power *= default_scale_smt_power(sd, cpu);
2642
2643                power >>= SCHED_POWER_SHIFT;
2644        }
2645
2646        sdg->sgp->power_orig = power;
2647
2648        if (sched_feat(ARCH_POWER))
2649                power *= arch_scale_freq_power(sd, cpu);
2650        else
2651                power *= default_scale_freq_power(sd, cpu);
2652
2653        power >>= SCHED_POWER_SHIFT;
2654
2655        power *= scale_rt_power(cpu);
2656        power >>= SCHED_POWER_SHIFT;
2657
2658        if (!power)
2659                power = 1;
2660
2661        cpu_rq(cpu)->cpu_power = power;
2662        sdg->sgp->power = power;
2663}
2664
2665static void update_group_power(struct sched_domain *sd, int cpu)
2666{
2667        struct sched_domain *child = sd->child;
2668        struct sched_group *group, *sdg = sd->groups;
2669        unsigned long power;
2670
2671        if (!child) {
2672                update_cpu_power(sd, cpu);
2673                return;
2674        }
2675
2676        power = 0;
2677
2678        group = child->groups;
2679        do {
2680                power += group->sgp->power;
2681                group = group->next;
2682        } while (group != child->groups);
2683
2684        sdg->sgp->power = power;
2685}
2686
2687/*
2688 * Try and fix up capacity for tiny siblings, this is needed when
2689 * things like SD_ASYM_PACKING need f_b_g to select another sibling
2690 * which on its own isn't powerful enough.
2691 *
2692 * See update_sd_pick_busiest() and check_asym_packing().
2693 */
2694static inline int
2695fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
2696{
2697        /*
2698         * Only siblings can have significantly less than SCHED_POWER_SCALE
2699         */
2700        if (!(sd->flags & SD_SHARE_CPUPOWER))
2701                return 0;
2702
2703        /*
2704         * If ~90% of the cpu_power is still there, we're good.
2705         */
2706        if (group->sgp->power * 32 > group->sgp->power_orig * 29)
2707                return 1;
2708
2709        return 0;
2710}
2711
2712/**
2713 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
2714 * @sd: The sched_domain whose statistics are to be updated.
2715 * @group: sched_group whose statistics are to be updated.
2716 * @this_cpu: Cpu for which load balance is currently performed.
2717 * @idle: Idle status of this_cpu
2718 * @load_idx: Load index of sched_domain of this_cpu for load calc.
2719 * @local_group: Does group contain this_cpu.
2720 * @cpus: Set of cpus considered for load balancing.
2721 * @balance: Should we balance.
2722 * @sgs: variable to hold the statistics for this group.
2723 */
2724static inline void update_sg_lb_stats(struct sched_domain *sd,
2725                        struct sched_group *group, int this_cpu,
2726                        enum cpu_idle_type idle, int load_idx,
2727                        int local_group, const struct cpumask *cpus,
2728                        int *balance, struct sg_lb_stats *sgs)
2729{
2730        unsigned long load, max_cpu_load, min_cpu_load, max_nr_running;
2731        int i;
2732        unsigned int balance_cpu = -1, first_idle_cpu = 0;
2733        unsigned long avg_load_per_task = 0;
2734
2735        if (local_group)
2736                balance_cpu = group_first_cpu(group);
2737
2738        /* Tally up the load of all CPUs in the group */
2739        max_cpu_load = 0;
2740        min_cpu_load = ~0UL;
2741        max_nr_running = 0;
2742
2743        for_each_cpu_and(i, sched_group_cpus(group), cpus) {
2744                struct rq *rq = cpu_rq(i);
2745
2746                /* Bias balancing toward cpus of our domain */
2747                if (local_group) {
2748                        if (idle_cpu(i) && !first_idle_cpu) {
2749                                first_idle_cpu = 1;
2750                                balance_cpu = i;
2751                        }
2752
2753                        load = target_load(i, load_idx);
2754                } else {
2755                        load = source_load(i, load_idx);
2756                        if (load > max_cpu_load) {
2757                                max_cpu_load = load;
2758                                max_nr_running = rq->nr_running;
2759                        }
2760                        if (min_cpu_load > load)
2761                                min_cpu_load = load;
2762                }
2763
2764                sgs->group_load += load;
2765                sgs->sum_nr_running += rq->nr_running;
2766                sgs->sum_weighted_load += weighted_cpuload(i);
2767                if (idle_cpu(i))
2768                        sgs->idle_cpus++;
2769        }
2770
2771        /*
2772         * First idle cpu or the first cpu(busiest) in this sched group
2773         * is eligible for doing load balancing at this and above
2774         * domains. In the newly idle case, we will allow all the cpu's
2775         * to do the newly idle load balance.
2776         */
2777        if (idle != CPU_NEWLY_IDLE && local_group) {
2778                if (balance_cpu != this_cpu) {
2779                        *balance = 0;
2780                        return;
2781                }
2782                update_group_power(sd, this_cpu);
2783        }
2784
2785        /* Adjust by relative CPU power of the group */
2786        sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
2787
2788        /*
2789         * Consider the group unbalanced when the imbalance is larger
2790         * than the average weight of a task.
2791         *
2792         * APZ: with cgroup the avg task weight can vary wildly and
2793         *      might not be a suitable number - should we keep a
2794         *      normalized nr_running number somewhere that negates
2795         *      the hierarchy?
2796         */
2797        if (sgs->sum_nr_running)
2798                avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
2799
2800        if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1)
2801                sgs->group_imb = 1;
2802
2803        sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
2804                                                SCHED_POWER_SCALE);
2805        if (!sgs->group_capacity)
2806                sgs->group_capacity = fix_small_capacity(sd, group);
2807        sgs->group_weight = group->group_weight;
2808
2809        if (sgs->group_capacity > sgs->sum_nr_running)
2810                sgs->group_has_capacity = 1;
2811}
2812
2813/**
2814 * update_sd_pick_busiest - return 1 on busiest group
2815 * @sd: sched_domain whose statistics are to be checked
2816 * @sds: sched_domain statistics
2817 * @sg: sched_group candidate to be checked for being the busiest
2818 * @sgs: sched_group statistics
2819 * @this_cpu: the current cpu
2820 *
2821 * Determine if @sg is a busier group than the previously selected
2822 * busiest group.
2823 */
2824static bool update_sd_pick_busiest(struct sched_domain *sd,
2825                                   struct sd_lb_stats *sds,
2826                                   struct sched_group *sg,
2827                                   struct sg_lb_stats *sgs,
2828                                   int this_cpu)
2829{
2830        if (sgs->avg_load <= sds->max_load)
2831                return false;
2832
2833        if (sgs->sum_nr_running > sgs->group_capacity)
2834                return true;
2835
2836        if (sgs->group_imb)
2837                return true;
2838
2839        /*
2840         * ASYM_PACKING needs to move all the work to the lowest
2841         * numbered CPUs in the group, therefore mark all groups
2842         * higher than ourself as busy.
2843         */
2844        if ((sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
2845            this_cpu < group_first_cpu(sg)) {
2846                if (!sds->busiest)
2847                        return true;
2848
2849                if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
2850                        return true;
2851        }
2852
2853        return false;
2854}
2855
2856/**
2857 * update_sd_lb_stats - Update sched_group's statistics for load balancing.
2858 * @sd: sched_domain whose statistics are to be updated.
2859 * @this_cpu: Cpu for which load balance is currently performed.
2860 * @idle: Idle status of this_cpu
2861 * @cpus: Set of cpus considered for load balancing.
2862 * @balance: Should we balance.
2863 * @sds: variable to hold the statistics for this sched_domain.
2864 */
2865static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
2866                        enum cpu_idle_type idle, const struct cpumask *cpus,
2867                        int *balance, struct sd_lb_stats *sds)
2868{
2869        struct sched_domain *child = sd->child;
2870        struct sched_group *sg = sd->groups;
2871        struct sg_lb_stats sgs;
2872        int load_idx, prefer_sibling = 0;
2873
2874        if (child && child->flags & SD_PREFER_SIBLING)
2875                prefer_sibling = 1;
2876
2877        init_sd_power_savings_stats(sd, sds, idle);
2878        load_idx = get_sd_load_idx(sd, idle);
2879
2880        do {
2881                int local_group;
2882
2883                local_group = cpumask_test_cpu(this_cpu, sched_group_cpus(sg));
2884                memset(&sgs, 0, sizeof(sgs));
2885                update_sg_lb_stats(sd, sg, this_cpu, idle, load_idx,
2886                                local_group, cpus, balance, &sgs);
2887
2888                if (local_group && !(*balance))
2889                        return;
2890
2891                sds->total_load += sgs.group_load;
2892                sds->total_pwr += sg->sgp->power;
2893
2894                /*
2895                 * In case the child domain prefers tasks go to siblings
2896                 * first, lower the sg capacity to one so that we'll try
2897                 * and move all the excess tasks away. We lower the capacity
2898                 * of a group only if the local group has the capacity to fit
2899                 * these excess tasks, i.e. nr_running < group_capacity. The
2900                 * extra check prevents the case where you always pull from the
2901                 * heaviest group when it is already under-utilized (possible
2902                 * with a large weight task outweighs the tasks on the system).
2903                 */
2904                if (prefer_sibling && !local_group && sds->this_has_capacity)
2905                        sgs.group_capacity = min(sgs.group_capacity, 1UL);
2906
2907                if (local_group) {
2908                        sds->this_load = sgs.avg_load;
2909                        sds->this = sg;
2910                        sds->this_nr_running = sgs.sum_nr_running;
2911                        sds->this_load_per_task = sgs.sum_weighted_load;
2912                        sds->this_has_capacity = sgs.group_has_capacity;
2913                        sds->this_idle_cpus = sgs.idle_cpus;
2914                } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) {
2915                        sds->max_load = sgs.avg_load;
2916                        sds->busiest = sg;
2917                        sds->busiest_nr_running = sgs.sum_nr_running;
2918                        sds->busiest_idle_cpus = sgs.idle_cpus;
2919                        sds->busiest_group_capacity = sgs.group_capacity;
2920                        sds->busiest_load_per_task = sgs.sum_weighted_load;
2921                        sds->busiest_has_capacity = sgs.group_has_capacity;
2922                        sds->busiest_group_weight = sgs.group_weight;
2923                        sds->group_imb = sgs.group_imb;
2924                }
2925
2926                update_sd_power_savings_stats(sg, sds, local_group, &sgs);
2927                sg = sg->next;
2928        } while (sg != sd->groups);
2929}
2930
2931int __weak arch_sd_sibling_asym_packing(void)
2932{
2933       return 0*SD_ASYM_PACKING;
2934}
2935
2936/**
2937 * check_asym_packing - Check to see if the group is packed into the
2938 *                      sched doman.
2939 *
2940 * This is primarily intended to used at the sibling level.  Some
2941 * cores like POWER7 prefer to use lower numbered SMT threads.  In the
2942 * case of POWER7, it can move to lower SMT modes only when higher
2943 * threads are idle.  When in lower SMT modes, the threads will
2944 * perform better since they share less core resources.  Hence when we
2945 * have idle threads, we want them to be the higher ones.
2946 *
2947 * This packing function is run on idle threads.  It checks to see if
2948 * the busiest CPU in this domain (core in the P7 case) has a higher
2949 * CPU number than the packing function is being run on.  Here we are
2950 * assuming lower CPU number will be equivalent to lower a SMT thread
2951 * number.
2952 *
2953 * Returns 1 when packing is required and a task should be moved to
2954 * this CPU.  The amount of the imbalance is returned in *imbalance.
2955 *
2956 * @sd: The sched_domain whose packing is to be checked.
2957 * @sds: Statistics of the sched_domain which is to be packed
2958 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
2959 * @imbalance: returns amount of imbalanced due to packing.
2960 */
2961static int check_asym_packing(struct sched_domain *sd,
2962                              struct sd_lb_stats *sds,
2963                              int this_cpu, unsigned long *imbalance)
2964{
2965        int busiest_cpu;
2966
2967        if (!(sd->flags & SD_ASYM_PACKING))
2968                return 0;
2969
2970        if (!sds->busiest)
2971                return 0;
2972
2973        busiest_cpu = group_first_cpu(sds->busiest);
2974        if (this_cpu > busiest_cpu)
2975                return 0;
2976
2977        *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power,
2978                                       SCHED_POWER_SCALE);
2979        return 1;
2980}
2981
2982/**
2983 * fix_small_imbalance - Calculate the minor imbalance that exists
2984 *                      amongst the groups of a sched_domain, during
2985 *                      load balancing.
2986 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
2987 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
2988 * @imbalance: Variable to store the imbalance.
2989 */
2990static inline void fix_small_imbalance(struct sd_lb_stats *sds,
2991                                int this_cpu, unsigned long *imbalance)
2992{
2993        unsigned long tmp, pwr_now = 0, pwr_move = 0;
2994        unsigned int imbn = 2;
2995        unsigned long scaled_busy_load_per_task;
2996
2997        if (sds->this_nr_running) {
2998                sds->this_load_per_task /= sds->this_nr_running;
2999                if (sds->busiest_load_per_task >
3000                                sds->this_load_per_task)
3001                        imbn = 1;
3002        } else
3003                sds->this_load_per_task =
3004                        cpu_avg_load_per_task(this_cpu);
3005
3006        scaled_busy_load_per_task = sds->busiest_load_per_task
3007                                         * SCHED_POWER_SCALE;
3008        scaled_busy_load_per_task /= sds->busiest->sgp->power;
3009
3010        if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
3011                        (scaled_busy_load_per_task * imbn)) {
3012                *imbalance = sds->busiest_load_per_task;
3013                return;
3014        }
3015
3016        /*
3017         * OK, we don't have enough imbalance to justify moving tasks,
3018         * however we may be able to increase total CPU power used by
3019         * moving them.
3020         */
3021
3022        pwr_now += sds->busiest->sgp->power *
3023                        min(sds->busiest_load_per_task, sds->max_load);
3024        pwr_now += sds->this->sgp->power *
3025                        min(sds->this_load_per_task, sds->this_load);
3026        pwr_now /= SCHED_POWER_SCALE;
3027
3028        /* Amount of load we'd subtract */
3029        tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
3030                sds->busiest->sgp->power;
3031        if (sds->max_load > tmp)
3032                pwr_move += sds->busiest->sgp->power *
3033                        min(sds->busiest_load_per_task, sds->max_load - tmp);
3034
3035        /* Amount of load we'd add */
3036        if (sds->max_load * sds->busiest->sgp->power <
3037                sds->busiest_load_per_task * SCHED_POWER_SCALE)
3038                tmp = (sds->max_load * sds->busiest->sgp->power) /
3039                        sds->this->sgp->power;
3040        else
3041                tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
3042                        sds->this->sgp->power;
3043        pwr_move += sds->this->sgp->power *
3044                        min(sds->this_load_per_task, sds->this_load + tmp);
3045        pwr_move /= SCHED_POWER_SCALE;
3046
3047        /* Move if we gain throughput */
3048        if (pwr_move > pwr_now)
3049                *imbalance = sds->busiest_load_per_task;
3050}
3051
3052/**
3053 * calculate_imbalance - Calculate the amount of imbalance present within the
3054 *                       groups of a given sched_domain during load balance.
3055 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
3056 * @this_cpu: Cpu for which currently load balance is being performed.
3057 * @imbalance: The variable to store the imbalance.
3058 */
3059static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3060                unsigned long *imbalance)
3061{
3062        unsigned long max_pull, load_above_capacity = ~0UL;
3063
3064        sds->busiest_load_per_task /= sds->busiest_nr_running;
3065        if (sds->group_imb) {
3066                sds->busiest_load_per_task =
3067                        min(sds->busiest_load_per_task, sds->avg_load);
3068        }
3069
3070        /*
3071         * In the presence of smp nice balancing, certain scenarios can have
3072         * max load less than avg load(as we skip the groups at or below
3073         * its cpu_power, while calculating max_load..)
3074         */
3075        if (sds->max_load < sds->avg_load) {
3076                *imbalance = 0;
3077                return fix_small_imbalance(sds, this_cpu, imbalance);
3078        }
3079
3080        if (!sds->group_imb) {
3081                /*
3082                 * Don't want to pull so many tasks that a group would go idle.
3083                 */
3084                load_above_capacity = (sds->busiest_nr_running -
3085                                                sds->busiest_group_capacity);
3086
3087                load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
3088
3089                load_above_capacity /= sds->busiest->sgp->power;
3090        }
3091
3092        /*
3093         * We're trying to get all the cpus to the average_load, so we don't
3094         * want to push ourselves above the average load, nor do we wish to
3095         * reduce the max loaded cpu below the average load. At the same time,
3096         * we also don't want to reduce the group load below the group capacity
3097         * (so that we can implement power-savings policies etc). Thus we look
3098         * for the minimum possible imbalance.
3099         * Be careful of negative numbers as they'll appear as very large values
3100         * with unsigned longs.
3101         */
3102        max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
3103
3104        /* How much load to actually move to equalise the imbalance */
3105        *imbalance = min(max_pull * sds->busiest->sgp->power,
3106                (sds->avg_load - sds->this_load) * sds->this->sgp->power)
3107                        / SCHED_POWER_SCALE;
3108
3109        /*
3110         * if *imbalance is less than the average load per runnable task
3111         * there is no guarantee that any tasks will be moved so we'll have
3112         * a think about bumping its value to force at least one task to be
3113         * moved
3114         */
3115        if (*imbalance < sds->busiest_load_per_task)
3116                return fix_small_imbalance(sds, this_cpu, imbalance);
3117
3118}
3119
3120/******* find_busiest_group() helpers end here *********************/
3121
3122/**
3123 * find_busiest_group - Returns the busiest group within the sched_domain
3124 * if there is an imbalance. If there isn't an imbalance, and
3125 * the user has opted for power-savings, it returns a group whose
3126 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
3127 * such a group exists.
3128 *
3129 * Also calculates the amount of weighted load which should be moved
3130 * to restore balance.
3131 *
3132 * @sd: The sched_domain whose busiest group is to be returned.
3133 * @this_cpu: The cpu for which load balancing is currently being performed.
3134 * @imbalance: Variable which stores amount of weighted load which should
3135 *              be moved to restore balance/put a group to idle.
3136 * @idle: The idle status of this_cpu.
3137 * @cpus: The set of CPUs under consideration for load-balancing.
3138 * @balance: Pointer to a variable indicating if this_cpu
3139 *      is the appropriate cpu to perform load balancing at this_level.
3140 *
3141 * Returns:     - the busiest group if imbalance exists.
3142 *              - If no imbalance and user has opted for power-savings balance,
3143 *                 return the least loaded group whose CPUs can be
3144 *                 put to idle by rebalancing its tasks onto our group.
3145 */
3146static struct sched_group *
3147find_busiest_group(struct sched_domain *sd, int this_cpu,
3148                   unsigned long *imbalance, enum cpu_idle_type idle,
3149                   const struct cpumask *cpus, int *balance)
3150{
3151        struct sd_lb_stats sds;
3152
3153        memset(&sds, 0, sizeof(sds));
3154
3155        /*
3156         * Compute the various statistics relavent for load balancing at
3157         * this level.
3158         */
3159        update_sd_lb_stats(sd, this_cpu, idle, cpus, balance, &sds);
3160
3161        /*
3162         * this_cpu is not the appropriate cpu to perform load balancing at
3163         * this level.
3164         */
3165        if (!(*balance))
3166                goto ret;
3167
3168        if ((idle == CPU_IDLE || idle == CPU_NEWLY_IDLE) &&
3169            check_asym_packing(sd, &sds, this_cpu, imbalance))
3170                return sds.busiest;
3171
3172        /* There is no busy sibling group to pull tasks from */
3173        if (!sds.busiest || sds.busiest_nr_running == 0)
3174                goto out_balanced;
3175
3176        sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
3177
3178        /*
3179         * If the busiest group is imbalanced the below checks don't
3180         * work because they assumes all things are equal, which typically
3181         * isn't true due to cpus_allowed constraints and the like.
3182         */
3183        if (sds.group_imb)
3184                goto force_balance;
3185
3186        /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
3187        if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
3188                        !sds.busiest_has_capacity)
3189                goto force_balance;
3190
3191        /*
3192         * If the local group is more busy than the selected busiest group
3193         * don't try and pull any tasks.
3194         */
3195        if (sds.this_load >= sds.max_load)
3196                goto out_balanced;
3197
3198        /*
3199         * Don't pull any tasks if this group is already above the domain
3200         * average load.
3201         */
3202        if (sds.this_load >= sds.avg_load)
3203                goto out_balanced;
3204
3205        if (idle == CPU_IDLE) {
3206                /*
3207                 * This cpu is idle. If the busiest group load doesn't
3208                 * have more tasks than the number of available cpu's and
3209                 * there is no imbalance between this and busiest group
3210                 * wrt to idle cpu's, it is balanced.
3211                 */
3212                if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
3213                    sds.busiest_nr_running <= sds.busiest_group_weight)
3214                        goto out_balanced;
3215        } else {
3216                /*
3217                 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
3218                 * imbalance_pct to be conservative.
3219                 */
3220                if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
3221                        goto out_balanced;
3222        }
3223
3224force_balance:
3225        /* Looks like there is an imbalance. Compute it */
3226        calculate_imbalance(&sds, this_cpu, imbalance);
3227        return sds.busiest;
3228
3229out_balanced:
3230        /*
3231         * There is no obvious imbalance. But check if we can do some balancing
3232         * to save power.
3233         */
3234        if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
3235                return sds.busiest;
3236ret:
3237        *imbalance = 0;
3238        return NULL;
3239}
3240
3241/*
3242 * find_busiest_queue - find the busiest runqueue among the cpus in group.
3243 */
3244static struct rq *
3245find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
3246                   enum cpu_idle_type idle, unsigned long imbalance,
3247                   const struct cpumask *cpus)
3248{
3249        struct rq *busiest = NULL, *rq;
3250        unsigned long max_load = 0;
3251        int i;
3252
3253        for_each_cpu(i, sched_group_cpus(group)) {
3254                unsigned long power = power_of(i);
3255                unsigned long capacity = DIV_ROUND_CLOSEST(power,
3256                                                           SCHED_POWER_SCALE);
3257                unsigned long wl;
3258
3259                if (!capacity)
3260                        capacity = fix_small_capacity(sd, group);
3261
3262                if (!cpumask_test_cpu(i, cpus))
3263                        continue;
3264
3265                rq = cpu_rq(i);
3266                wl = weighted_cpuload(i);
3267
3268                /*
3269                 * When comparing with imbalance, use weighted_cpuload()
3270                 * which is not scaled with the cpu power.
3271                 */
3272                if (capacity && rq->nr_running == 1 && wl > imbalance)
3273                        continue;
3274
3275                /*
3276                 * For the load comparisons with the other cpu's, consider
3277                 * the weighted_cpuload() scaled with the cpu power, so that
3278                 * the load can be moved away from the cpu that is potentially
3279                 * running at a lower capacity.
3280                 */
3281                wl = (wl * SCHED_POWER_SCALE) / power;
3282
3283                if (wl > max_load) {
3284                        max_load = wl;
3285                        busiest = rq;
3286                }
3287        }
3288
3289        return busiest;
3290}
3291
3292/*
3293 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
3294 * so long as it is large enough.
3295 */
3296#define MAX_PINNED_INTERVAL     512
3297
3298/* Working cpumask for load_balance and load_balance_newidle. */
3299static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
3300
3301static int need_active_balance(struct sched_domain *sd, int idle,
3302                               int busiest_cpu, int this_cpu)
3303{
3304        if (idle == CPU_NEWLY_IDLE) {
3305
3306                /*
3307                 * ASYM_PACKING needs to force migrate tasks from busy but
3308                 * higher numbered CPUs in order to pack all tasks in the
3309                 * lowest numbered CPUs.
3310                 */
3311                if ((sd->flags & SD_ASYM_PACKING) && busiest_cpu > this_cpu)
3312                        return 1;
3313
3314                /*
3315                 * The only task running in a non-idle cpu can be moved to this
3316                 * cpu in an attempt to completely freeup the other CPU
3317                 * package.
3318                 *
3319                 * The package power saving logic comes from
3320                 * find_busiest_group(). If there are no imbalance, then
3321                 * f_b_g() will return NULL. However when sched_mc={1,2} then
3322                 * f_b_g() will select a group from which a running task may be
3323                 * pulled to this cpu in order to make the other package idle.
3324                 * If there is no opportunity to make a package idle and if
3325                 * there are no imbalance, then f_b_g() will return NULL and no
3326                 * action will be taken in load_balance_newidle().
3327                 *
3328                 * Under normal task pull operation due to imbalance, there
3329                 * will be more than one task in the source run queue and
3330                 * move_tasks() will succeed.  ld_moved will be true and this
3331                 * active balance code will not be triggered.
3332                 */
3333                if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
3334                        return 0;
3335        }
3336
3337        return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
3338}
3339
3340static int active_load_balance_cpu_stop(void *data);
3341
3342/*
3343 * Check this_cpu to ensure it is balanced within domain. Attempt to move
3344 * tasks if there is an imbalance.
3345 */
3346static int load_balance(int this_cpu, struct rq *this_rq,
3347                        struct sched_domain *sd, enum cpu_idle_type idle,
3348                        int *balance)
3349{
3350        int ld_moved, all_pinned = 0, active_balance = 0;
3351        struct sched_group *group;
3352        unsigned long imbalance;
3353        struct rq *busiest;
3354        unsigned long flags;
3355        struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
3356
3357        cpumask_copy(cpus, cpu_active_mask);
3358
3359        schedstat_inc(sd, lb_count[idle]);
3360
3361redo:
3362        group = find_busiest_group(sd, this_cpu, &imbalance, idle,
3363                                   cpus, balance);
3364
3365        if (*balance == 0)
3366                goto out_balanced;
3367
3368        if (!group) {
3369                schedstat_inc(sd, lb_nobusyg[idle]);
3370                goto out_balanced;
3371        }
3372
3373        busiest = find_busiest_queue(sd, group, idle, imbalance, cpus);
3374        if (!busiest) {
3375                schedstat_inc(sd, lb_nobusyq[idle]);
3376                goto out_balanced;
3377        }
3378
3379        BUG_ON(busiest == this_rq);
3380
3381        schedstat_add(sd, lb_imbalance[idle], imbalance);
3382
3383        ld_moved = 0;
3384        if (busiest->nr_running > 1) {
3385                /*
3386                 * Attempt to move tasks. If find_busiest_group has found
3387                 * an imbalance but busiest->nr_running <= 1, the group is
3388                 * still unbalanced. ld_moved simply stays zero, so it is
3389                 * correctly treated as an imbalance.
3390                 */
3391                all_pinned = 1;
3392                local_irq_save(flags);
3393                double_rq_lock(this_rq, busiest);
3394                ld_moved = move_tasks(this_rq, this_cpu, busiest,
3395                                      imbalance, sd, idle, &all_pinned);
3396                double_rq_unlock(this_rq, busiest);
3397                local_irq_restore(flags);
3398
3399                /*
3400                 * some other cpu did the load balance for us.
3401                 */
3402                if (ld_moved && this_cpu != smp_processor_id())
3403                        resched_cpu(this_cpu);
3404
3405                /* All tasks on this runqueue were pinned by CPU affinity */
3406                if (unlikely(all_pinned)) {
3407                        cpumask_clear_cpu(cpu_of(busiest), cpus);
3408                        if (!cpumask_empty(cpus))
3409                                goto redo;
3410                        goto out_balanced;
3411                }
3412        }
3413
3414        if (!ld_moved) {
3415                schedstat_inc(sd, lb_failed[idle]);
3416                /*
3417                 * Increment the failure counter only on periodic balance.
3418                 * We do not want newidle balance, which can be very
3419                 * frequent, pollute the failure counter causing
3420                 * excessive cache_hot migrations and active balances.
3421                 */
3422                if (idle != CPU_NEWLY_IDLE)
3423                        sd->nr_balance_failed++;
3424
3425                if (need_active_balance(sd, idle, cpu_of(busiest), this_cpu)) {
3426                        raw_spin_lock_irqsave(&busiest->lock, flags);
3427
3428                        /* don't kick the active_load_balance_cpu_stop,
3429                         * if the curr task on busiest cpu can't be
3430                         * moved to this_cpu
3431                         */
3432                        if (!cpumask_test_cpu(this_cpu,
3433                                              &busiest->curr->cpus_allowed)) {
3434                                raw_spin_unlock_irqrestore(&busiest->lock,
3435                                                            flags);
3436                                all_pinned = 1;
3437                                goto out_one_pinned;
3438                        }
3439
3440                        /*
3441                         * ->active_balance synchronizes accesses to
3442                         * ->active_balance_work.  Once set, it's cleared
3443                         * only after active load balance is finished.
3444                         */
3445                        if (!busiest->active_balance) {
3446                                busiest->active_balance = 1;
3447                                busiest->push_cpu = this_cpu;
3448                                active_balance = 1;
3449                        }
3450                        raw_spin_unlock_irqrestore(&busiest->lock, flags);
3451
3452                        if (active_balance)
3453                                stop_one_cpu_nowait(cpu_of(busiest),
3454                                        active_load_balance_cpu_stop, busiest,
3455                                        &busiest->active_balance_work);
3456
3457                        /*
3458                         * We've kicked active balancing, reset the failure
3459                         * counter.
3460                         */
3461                        sd->nr_balance_failed = sd->cache_nice_tries+1;
3462                }
3463        } else
3464                sd->nr_balance_failed = 0;
3465
3466        if (likely(!active_balance)) {
3467                /* We were unbalanced, so reset the balancing interval */
3468                sd->balance_interval = sd->min_interval;
3469        } else {
3470                /*
3471                 * If we've begun active balancing, start to back off. This
3472                 * case may not be covered by the all_pinned logic if there
3473                 * is only 1 task on the busy runqueue (because we don't call
3474                 * move_tasks).
3475                 */
3476                if (sd->balance_interval < sd->max_interval)
3477                        sd->balance_interval *= 2;
3478        }
3479
3480        goto out;
3481
3482out_balanced:
3483        schedstat_inc(sd, lb_balanced[idle]);
3484
3485        sd->nr_balance_failed = 0;
3486
3487out_one_pinned:
3488        /* tune up the balancing interval */
3489        if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
3490                        (sd->balance_interval < sd->max_interval))
3491                sd->balance_interval *= 2;
3492
3493        ld_moved = 0;
3494out:
3495        return ld_moved;
3496}
3497
3498/*
3499 * idle_balance is called by schedule() if this_cpu is about to become
3500 * idle. Attempts to pull tasks from other CPUs.
3501 */
3502static void idle_balance(int this_cpu, struct rq *this_rq)
3503{
3504        struct sched_domain *sd;
3505        int pulled_task = 0;
3506        unsigned long next_balance = jiffies + HZ;
3507
3508        this_rq->idle_stamp = this_rq->clock;
3509
3510        if (this_rq->avg_idle < sysctl_sched_migration_cost)
3511                return;
3512
3513        /*
3514         * Drop the rq->lock, but keep IRQ/preempt disabled.
3515         */
3516        raw_spin_unlock(&this_rq->lock);
3517
3518        update_shares(this_cpu);
3519        rcu_read_lock();
3520        for_each_domain(this_cpu, sd) {
3521                unsigned long interval;
3522                int balance = 1;
3523
3524                if (!(sd->flags & SD_LOAD_BALANCE))
3525                        continue;
3526
3527                if (sd->flags & SD_BALANCE_NEWIDLE) {
3528                        /* If we've pulled tasks over stop searching: */
3529                        pulled_task = load_balance(this_cpu, this_rq,
3530                                                   sd, CPU_NEWLY_IDLE, &balance);
3531                }
3532
3533                interval = msecs_to_jiffies(sd->balance_interval);
3534                if (time_after(next_balance, sd->last_balance + interval))
3535                        next_balance = sd->last_balance + interval;
3536                if (pulled_task) {
3537                        this_rq->idle_stamp = 0;
3538                        break;
3539                }
3540        }
3541        rcu_read_unlock();
3542
3543        raw_spin_lock(&this_rq->lock);
3544
3545        if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
3546                /*
3547                 * We are going idle. next_balance may be set based on
3548                 * a busy processor. So reset next_balance.
3549                 */
3550                this_rq->next_balance = next_balance;
3551        }
3552}
3553
3554/*
3555 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
3556 * running tasks off the busiest CPU onto idle CPUs. It requires at
3557 * least 1 task to be running on each physical CPU where possible, and
3558 * avoids physical / logical imbalances.
3559 */
3560static int active_load_balance_cpu_stop(void *data)
3561{
3562        struct rq *busiest_rq = data;
3563        int busiest_cpu = cpu_of(busiest_rq);
3564        int target_cpu = busiest_rq->push_cpu;
3565        struct rq *target_rq = cpu_rq(target_cpu);
3566        struct sched_domain *sd;
3567
3568        raw_spin_lock_irq(&busiest_rq->lock);
3569
3570        /* make sure the requested cpu hasn't gone down in the meantime */
3571        if (unlikely(busiest_cpu != smp_processor_id() ||
3572                     !busiest_rq->active_balance))
3573                goto out_unlock;
3574
3575        /* Is there any task to move? */
3576        if (busiest_rq->nr_running <= 1)
3577                goto out_unlock;
3578
3579        /*
3580         * This condition is "impossible", if it occurs
3581         * we need to fix it. Originally reported by
3582         * Bjorn Helgaas on a 128-cpu setup.
3583         */
3584        BUG_ON(busiest_rq == target_rq);
3585
3586        /* move a task from busiest_rq to target_rq */
3587        double_lock_balance(busiest_rq, target_rq);
3588
3589        /* Search for an sd spanning us and the target CPU. */
3590        rcu_read_lock();
3591        for_each_domain(target_cpu, sd) {
3592                if ((sd->flags & SD_LOAD_BALANCE) &&
3593                    cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
3594                                break;
3595        }
3596
3597        if (likely(sd)) {
3598                schedstat_inc(sd, alb_count);
3599
3600                if (move_one_task(target_rq, target_cpu, busiest_rq,
3601                                  sd, CPU_IDLE))
3602                        schedstat_inc(sd, alb_pushed);
3603                else
3604                        schedstat_inc(sd, alb_failed);
3605        }
3606        rcu_read_unlock();
3607        double_unlock_balance(busiest_rq, target_rq);
3608out_unlock:
3609        busiest_rq->active_balance = 0;
3610        raw_spin_unlock_irq(&busiest_rq->lock);
3611        return 0;
3612}
3613
3614#ifdef CONFIG_NO_HZ
3615
3616static DEFINE_PER_CPU(struct call_single_data, remote_sched_softirq_cb);
3617
3618static void trigger_sched_softirq(void *data)
3619{
3620        raise_softirq_irqoff(SCHED_SOFTIRQ);
3621}
3622
3623static inline void init_sched_softirq_csd(struct call_single_data *csd)
3624{
3625        csd->func = trigger_sched_softirq;
3626        csd->info = NULL;
3627        csd->flags = 0;
3628        csd->priv = 0;
3629}
3630
3631/*
3632 * idle load balancing details
3633 * - One of the idle CPUs nominates itself as idle load_balancer, while
3634 *   entering idle.
3635 * - This idle load balancer CPU will also go into tickless mode when
3636 *   it is idle, just like all other idle CPUs
3637 * - When one of the busy CPUs notice that there may be an idle rebalancing
3638 *   needed, they will kick the idle load balancer, which then does idle
3639 *   load balancing for all the idle CPUs.
3640 */
3641static struct {
3642        atomic_t load_balancer;
3643        atomic_t first_pick_cpu;
3644        atomic_t second_pick_cpu;
3645        cpumask_var_t idle_cpus_mask;
3646        cpumask_var_t grp_idle_mask;
3647        unsigned long next_balance;     /* in jiffy units */
3648} nohz ____cacheline_aligned;
3649
3650int get_nohz_load_balancer(void)
3651{
3652        return atomic_read(&nohz.load_balancer);
3653}
3654
3655#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3656/**
3657 * lowest_flag_domain - Return lowest sched_domain containing flag.
3658 * @cpu:        The cpu whose lowest level of sched domain is to
3659 *              be returned.
3660 * @flag:       The flag to check for the lowest sched_domain
3661 *              for the given cpu.
3662 *
3663 * Returns the lowest sched_domain of a cpu which contains the given flag.
3664 */
3665static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
3666{
3667        struct sched_domain *sd;
3668
3669        for_each_domain(cpu, sd)
3670                if (sd && (sd->flags & flag))
3671                        break;
3672
3673        return sd;
3674}
3675
3676/**
3677 * for_each_flag_domain - Iterates over sched_domains containing the flag.
3678 * @cpu:        The cpu whose domains we're iterating over.
3679 * @sd:         variable holding the value of the power_savings_sd
3680 *              for cpu.
3681 * @flag:       The flag to filter the sched_domains to be iterated.
3682 *
3683 * Iterates over all the scheduler domains for a given cpu that has the 'flag'
3684 * set, starting from the lowest sched_domain to the highest.
3685 */
3686#define for_each_flag_domain(cpu, sd, flag) \
3687        for (sd = lowest_flag_domain(cpu, flag); \
3688                (sd && (sd->flags & flag)); sd = sd->parent)
3689
3690/**
3691 * is_semi_idle_group - Checks if the given sched_group is semi-idle.
3692 * @ilb_group:  group to be checked for semi-idleness
3693 *
3694 * Returns:     1 if the group is semi-idle. 0 otherwise.
3695 *
3696 * We define a sched_group to be semi idle if it has atleast one idle-CPU
3697 * and atleast one non-idle CPU. This helper function checks if the given
3698 * sched_group is semi-idle or not.
3699 */
3700static inline int is_semi_idle_group(struct sched_group *ilb_group)
3701{
3702        cpumask_and(nohz.grp_idle_mask, nohz.idle_cpus_mask,
3703                                        sched_group_cpus(ilb_group));
3704
3705        /*
3706         * A sched_group is semi-idle when it has atleast one busy cpu
3707         * and atleast one idle cpu.
3708         */
3709        if (cpumask_empty(nohz.grp_idle_mask))
3710                return 0;
3711
3712        if (cpumask_equal(nohz.grp_idle_mask, sched_group_cpus(ilb_group)))
3713                return 0;
3714
3715        return 1;
3716}
3717/**
3718 * find_new_ilb - Finds the optimum idle load balancer for nomination.
3719 * @cpu:        The cpu which is nominating a new idle_load_balancer.
3720 *
3721 * Returns:     Returns the id of the idle load balancer if it exists,
3722 *              Else, returns >= nr_cpu_ids.
3723 *
3724 * This algorithm picks the idle load balancer such that it belongs to a
3725 * semi-idle powersavings sched_domain. The idea is to try and avoid
3726 * completely idle packages/cores just for the purpose of idle load balancing
3727 * when there are other idle cpu's which are better suited for that job.
3728 */
3729static int find_new_ilb(int cpu)
3730{
3731        struct sched_domain *sd;
3732        struct sched_group *ilb_group;
3733        int ilb = nr_cpu_ids;
3734
3735        /*
3736         * Have idle load balancer selection from semi-idle packages only
3737         * when power-aware load balancing is enabled
3738         */
3739        if (!(sched_smt_power_savings || sched_mc_power_savings))
3740                goto out_done;
3741
3742        /*
3743         * Optimize for the case when we have no idle CPUs or only one
3744         * idle CPU. Don't walk the sched_domain hierarchy in such cases
3745         */
3746        if (cpumask_weight(nohz.idle_cpus_mask) < 2)
3747                goto out_done;
3748
3749        rcu_read_lock();
3750        for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
3751                ilb_group = sd->groups;
3752
3753                do {
3754                        if (is_semi_idle_group(ilb_group)) {
3755                                ilb = cpumask_first(nohz.grp_idle_mask);
3756                                goto unlock;
3757                        }
3758
3759                        ilb_group = ilb_group->next;
3760
3761                } while (ilb_group != sd->groups);
3762        }
3763unlock:
3764        rcu_read_unlock();
3765
3766out_done:
3767        return ilb;
3768}
3769#else /*  (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
3770static inline int find_new_ilb(int call_cpu)
3771{
3772        return nr_cpu_ids;
3773}
3774#endif
3775
3776/*
3777 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
3778 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
3779 * CPU (if there is one).
3780 */
3781static void nohz_balancer_kick(int cpu)
3782{
3783        int ilb_cpu;
3784
3785        nohz.next_balance++;
3786
3787        ilb_cpu = get_nohz_load_balancer();
3788
3789        if (ilb_cpu >= nr_cpu_ids) {
3790                ilb_cpu = cpumask_first(nohz.idle_cpus_mask);
3791                if (ilb_cpu >= nr_cpu_ids)
3792                        return;
3793        }
3794
3795        if (!cpu_rq(ilb_cpu)->nohz_balance_kick) {
3796                struct call_single_data *cp;
3797
3798                cpu_rq(ilb_cpu)->nohz_balance_kick = 1;
3799                cp = &per_cpu(remote_sched_softirq_cb, cpu);
3800                __smp_call_function_single(ilb_cpu, cp, 0);
3801        }
3802        return;
3803}
3804
3805/*
3806 * This routine will try to nominate the ilb (idle load balancing)
3807 * owner among the cpus whose ticks are stopped. ilb owner will do the idle
3808 * load balancing on behalf of all those cpus.
3809 *
3810 * When the ilb owner becomes busy, we will not have new ilb owner until some
3811 * idle CPU wakes up and goes back to idle or some busy CPU tries to kick
3812 * idle load balancing by kicking one of the idle CPUs.
3813 *
3814 * Ticks are stopped for the ilb owner as well, with busy CPU kicking this
3815 * ilb owner CPU in future (when there is a need for idle load balancing on
3816 * behalf of all idle CPUs).
3817 */
3818void select_nohz_load_balancer(int stop_tick)
3819{
3820        int cpu = smp_processor_id();
3821
3822        if (stop_tick) {
3823                if (!cpu_active(cpu)) {
3824                        if (atomic_read(&nohz.load_balancer) != cpu)
3825                                return;
3826
3827                        /*
3828                         * If we are going offline and still the leader,
3829                         * give up!
3830                         */
3831                        if (atomic_cmpxchg(&nohz.load_balancer, cpu,
3832                                           nr_cpu_ids) != cpu)
3833                                BUG();
3834
3835                        return;
3836                }
3837
3838                cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
3839
3840                if (atomic_read(&nohz.first_pick_cpu) == cpu)
3841                        atomic_cmpxchg(&nohz.first_pick_cpu, cpu, nr_cpu_ids);
3842                if (atomic_read(&nohz.second_pick_cpu) == cpu)
3843                        atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids);
3844
3845                if (atomic_read(&nohz.load_balancer) >= nr_cpu_ids) {
3846                        int new_ilb;
3847
3848                        /* make me the ilb owner */
3849                        if (atomic_cmpxchg(&nohz.load_balancer, nr_cpu_ids,
3850                                           cpu) != nr_cpu_ids)
3851                                return;
3852
3853                        /*
3854                         * Check to see if there is a more power-efficient
3855                         * ilb.
3856                         */
3857                        new_ilb = find_new_ilb(cpu);
3858                        if (new_ilb < nr_cpu_ids && new_ilb != cpu) {
3859                                atomic_set(&nohz.load_balancer, nr_cpu_ids);
3860                                resched_cpu(new_ilb);
3861                                return;
3862                        }
3863                        return;
3864                }
3865        } else {
3866                if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
3867                        return;
3868
3869                cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
3870
3871                if (atomic_read(&nohz.load_balancer) == cpu)
3872                        if (atomic_cmpxchg(&nohz.load_balancer, cpu,
3873                                           nr_cpu_ids) != cpu)
3874                                BUG();
3875        }
3876        return;
3877}
3878#endif
3879
3880static DEFINE_SPINLOCK(balancing);
3881
3882static unsigned long __read_mostly max_load_balance_interval = HZ/10;
3883
3884/*
3885 * Scale the max load_balance interval with the number of CPUs in the system.
3886 * This trades load-balance latency on larger machines for less cross talk.
3887 */
3888static void update_max_interval(void)
3889{
3890        max_load_balance_interval = HZ*num_online_cpus()/10;
3891}
3892
3893/*
3894 * It checks each scheduling domain to see if it is due to be balanced,
3895 * and initiates a balancing operation if so.
3896 *
3897 * Balancing parameters are set up in arch_init_sched_domains.
3898 */
3899static void rebalance_domains(int cpu, enum cpu_idle_type idle)
3900{
3901        int balance = 1;
3902        struct rq *rq = cpu_rq(cpu);
3903        unsigned long interval;
3904        struct sched_domain *sd;
3905        /* Earliest time when we have to do rebalance again */
3906        unsigned long next_balance = jiffies + 60*HZ;
3907        int update_next_balance = 0;
3908        int need_serialize;
3909
3910        update_shares(cpu);
3911
3912        rcu_read_lock();
3913        for_each_domain(cpu, sd) {
3914                if (!(sd->flags & SD_LOAD_BALANCE))
3915                        continue;
3916
3917                interval = sd->balance_interval;
3918                if (idle != CPU_IDLE)
3919                        interval *= sd->busy_factor;
3920
3921                /* scale ms to jiffies */
3922                interval = msecs_to_jiffies(interval);
3923                interval = clamp(interval, 1UL, max_load_balance_interval);
3924
3925                need_serialize = sd->flags & SD_SERIALIZE;
3926
3927                if (need_serialize) {
3928                        if (!spin_trylock(&balancing))
3929                                goto out;
3930                }
3931
3932                if (time_after_eq(jiffies, sd->last_balance + interval)) {
3933                        if (load_balance(cpu, rq, sd, idle, &balance)) {
3934                                /*
3935                                 * We've pulled tasks over so either we're no
3936                                 * longer idle.
3937                                 */
3938                                idle = CPU_NOT_IDLE;
3939                        }
3940                        sd->last_balance = jiffies;
3941                }
3942                if (need_serialize)
3943                        spin_unlock(&balancing);
3944out:
3945                if (time_after(next_balance, sd->last_balance + interval)) {
3946                        next_balance = sd->last_balance + interval;
3947                        update_next_balance = 1;
3948                }
3949
3950                /*
3951                 * Stop the load balance at this level. There is another
3952                 * CPU in our sched group which is doing load balancing more
3953                 * actively.
3954                 */
3955                if (!balance)
3956                        break;
3957        }
3958        rcu_read_unlock();
3959
3960        /*
3961         * next_balance will be updated only when there is a need.
3962         * When the cpu is attached to null domain for ex, it will not be
3963         * updated.
3964         */
3965        if (likely(update_next_balance))
3966                rq->next_balance = next_balance;
3967}
3968
3969#ifdef CONFIG_NO_HZ
3970/*
3971 * In CONFIG_NO_HZ case, the idle balance kickee will do the
3972 * rebalancing for all the cpus for whom scheduler ticks are stopped.
3973 */
3974static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
3975{
3976        struct rq *this_rq = cpu_rq(this_cpu);
3977        struct rq *rq;
3978        int balance_cpu;
3979
3980        if (idle != CPU_IDLE || !this_rq->nohz_balance_kick)
3981                return;
3982
3983        for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
3984                if (balance_cpu == this_cpu)
3985                        continue;
3986
3987                /*
3988                 * If this cpu gets work to do, stop the load balancing
3989                 * work being done for other cpus. Next load
3990                 * balancing owner will pick it up.
3991                 */
3992                if (need_resched()) {
3993                        this_rq->nohz_balance_kick = 0;
3994                        break;
3995                }
3996
3997                raw_spin_lock_irq(&this_rq->lock);
3998                update_rq_clock(this_rq);
3999                update_cpu_load(this_rq);
4000                raw_spin_unlock_irq(&this_rq->lock);
4001
4002                rebalance_domains(balance_cpu, CPU_IDLE);
4003
4004                rq = cpu_rq(balance_cpu);
4005                if (time_after(this_rq->next_balance, rq->next_balance))
4006                        this_rq->next_balance = rq->next_balance;
4007        }
4008        nohz.next_balance = this_rq->next_balance;
4009        this_rq->nohz_balance_kick = 0;
4010}
4011
4012/*
4013 * Current heuristic for kicking the idle load balancer
4014 * - first_pick_cpu is the one of the busy CPUs. It will kick
4015 *   idle load balancer when it has more than one process active. This
4016 *   eliminates the need for idle load balancing altogether when we have
4017 *   only one running process in the system (common case).
4018 * - If there are more than one busy CPU, idle load balancer may have
4019 *   to run for active_load_balance to happen (i.e., two busy CPUs are
4020 *   SMT or core siblings and can run better if they move to different
4021 *   physical CPUs). So, second_pick_cpu is the second of the busy CPUs
4022 *   which will kick idle load balancer as soon as it has any load.
4023 */
4024static inline int nohz_kick_needed(struct rq *rq, int cpu)
4025{
4026        unsigned long now = jiffies;
4027        int ret;
4028        int first_pick_cpu, second_pick_cpu;
4029
4030        if (time_before(now, nohz.next_balance))
4031                return 0;
4032
4033        if (rq->idle_at_tick)
4034                return 0;
4035
4036        first_pick_cpu = atomic_read(&nohz.first_pick_cpu);
4037        second_pick_cpu = atomic_read(&nohz.second_pick_cpu);
4038
4039        if (first_pick_cpu < nr_cpu_ids && first_pick_cpu != cpu &&
4040            second_pick_cpu < nr_cpu_ids && second_pick_cpu != cpu)
4041                return 0;
4042
4043        ret = atomic_cmpxchg(&nohz.first_pick_cpu, nr_cpu_ids, cpu);
4044        if (ret == nr_cpu_ids || ret == cpu) {
4045                atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids);
4046                if (rq->nr_running > 1)
4047                        return 1;
4048        } else {
4049                ret = atomic_cmpxchg(&nohz.second_pick_cpu, nr_cpu_ids, cpu);
4050                if (ret == nr_cpu_ids || ret == cpu) {
4051                        if (rq->nr_running)
4052                                return 1;
4053                }
4054        }
4055        return 0;
4056}
4057#else
4058static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
4059#endif
4060
4061/*
4062 * run_rebalance_domains is triggered when needed from the scheduler tick.
4063 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
4064 */
4065static void run_rebalance_domains(struct softirq_action *h)
4066{
4067        int this_cpu = smp_processor_id();
4068        struct rq *this_rq = cpu_rq(this_cpu);
4069        enum cpu_idle_type idle = this_rq->idle_at_tick ?
4070                                                CPU_IDLE : CPU_NOT_IDLE;
4071
4072        rebalance_domains(this_cpu, idle);
4073
4074        /*
4075         * If this cpu has a pending nohz_balance_kick, then do the
4076         * balancing on behalf of the other idle cpus whose ticks are
4077         * stopped.
4078         */
4079        nohz_idle_balance(this_cpu, idle);
4080}
4081
4082static inline int on_null_domain(int cpu)
4083{
4084        return !rcu_dereference_sched(cpu_rq(cpu)->sd);
4085}
4086
4087/*
4088 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
4089 */
4090static inline void trigger_load_balance(struct rq *rq, int cpu)
4091{
4092        /* Don't need to rebalance while attached to NULL domain */
4093        if (time_after_eq(jiffies, rq->next_balance) &&
4094            likely(!on_null_domain(cpu)))
4095                raise_softirq(SCHED_SOFTIRQ);
4096#ifdef CONFIG_NO_HZ
4097        else if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
4098                nohz_balancer_kick(cpu);
4099#endif
4100}
4101
4102static void rq_online_fair(struct rq *rq)
4103{
4104        update_sysctl();
4105}
4106
4107static void rq_offline_fair(struct rq *rq)
4108{
4109        update_sysctl();
4110}
4111
4112#else   /* CONFIG_SMP */
4113
4114/*
4115 * on UP we do not need to balance between CPUs:
4116 */
4117static inline void idle_balance(int cpu, struct rq *rq)
4118{
4119}
4120
4121#endif /* CONFIG_SMP */
4122
4123/*
4124 * scheduler tick hitting a task of our scheduling class:
4125 */
4126static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
4127{
4128        struct cfs_rq *cfs_rq;
4129        struct sched_entity *se = &curr->se;
4130
4131        for_each_sched_entity(se) {
4132                cfs_rq = cfs_rq_of(se);
4133                entity_tick(cfs_rq, se, queued);
4134        }
4135}
4136
4137/*
4138 * called on fork with the child task as argument from the parent's context
4139 *  - child not yet on the tasklist
4140 *  - preemption disabled
4141 */
4142static void task_fork_fair(struct task_struct *p)
4143{
4144        struct cfs_rq *cfs_rq = task_cfs_rq(current);
4145        struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
4146        int this_cpu = smp_processor_id();
4147        struct rq *rq = this_rq();
4148        unsigned long flags;
4149
4150        raw_spin_lock_irqsave(&rq->lock, flags);
4151
4152        update_rq_clock(rq);
4153
4154        if (unlikely(task_cpu(p) != this_cpu)) {
4155                rcu_read_lock();
4156                __set_task_cpu(p, this_cpu);
4157                rcu_read_unlock();
4158        }
4159
4160        update_curr(cfs_rq);
4161
4162        if (curr)
4163                se->vruntime = curr->vruntime;
4164        place_entity(cfs_rq, se, 1);
4165
4166        if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
4167                /*
4168                 * Upon rescheduling, sched_class::put_prev_task() will place
4169                 * 'current' within the tree based on its new key value.
4170                 */
4171                swap(curr->vruntime, se->vruntime);
4172                resched_task(rq->curr);
4173        }
4174
4175        se->vruntime -= cfs_rq->min_vruntime;
4176
4177        raw_spin_unlock_irqrestore(&rq->lock, flags);
4178}
4179
4180/*
4181 * Priority of the task has changed. Check to see if we preempt
4182 * the current task.
4183 */
4184static void
4185prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
4186{
4187        if (!p->se.on_rq)
4188                return;
4189
4190        /*
4191         * Reschedule if we are currently running on this runqueue and
4192         * our priority decreased, or if we are not currently running on
4193         * this runqueue and our priority is higher than the current's
4194         */
4195        if (rq->curr == p) {
4196                if (p->prio > oldprio)
4197                        resched_task(rq->curr);
4198        } else
4199                check_preempt_curr(rq, p, 0);
4200}
4201
4202static void switched_from_fair(struct rq *rq, struct task_struct *p)
4203{
4204        struct sched_entity *se = &p->se;
4205        struct cfs_rq *cfs_rq = cfs_rq_of(se);
4206
4207        /*
4208         * Ensure the task's vruntime is normalized, so that when its
4209         * switched back to the fair class the enqueue_entity(.flags=0) will
4210         * do the right thing.
4211         *
4212         * If it was on_rq, then the dequeue_entity(.flags=0) will already
4213         * have normalized the vruntime, if it was !on_rq, then only when
4214         * the task is sleeping will it still have non-normalized vruntime.
4215         */
4216        if (!se->on_rq && p->state != TASK_RUNNING) {
4217                /*
4218                 * Fix up our vruntime so that the current sleep doesn't
4219                 * cause 'unlimited' sleep bonus.
4220                 */
4221                place_entity(cfs_rq, se, 0);
4222                se->vruntime -= cfs_rq->min_vruntime;
4223        }
4224}
4225
4226/*
4227 * We switched to the sched_fair class.
4228 */
4229static void switched_to_fair(struct rq *rq, struct task_struct *p)
4230{
4231        if (!p->se.on_rq)
4232                return;
4233
4234        /*
4235         * We were most likely switched from sched_rt, so
4236         * kick off the schedule if running, otherwise just see
4237         * if we can still preempt the current task.
4238         */
4239        if (rq->curr == p)
4240                resched_task(rq->curr);
4241        else
4242                check_preempt_curr(rq, p, 0);
4243}
4244
4245/* Account for a task changing its policy or group.
4246 *
4247 * This routine is mostly called to set cfs_rq->curr field when a task
4248 * migrates between groups/classes.
4249 */
4250static void set_curr_task_fair(struct rq *rq)
4251{
4252        struct sched_entity *se = &rq->curr->se;
4253
4254        for_each_sched_entity(se)
4255                set_next_entity(cfs_rq_of(se), se);
4256}
4257
4258#ifdef CONFIG_FAIR_GROUP_SCHED
4259static void task_move_group_fair(struct task_struct *p, int on_rq)
4260{
4261        /*
4262         * If the task was not on the rq at the time of this cgroup movement
4263         * it must have been asleep, sleeping tasks keep their ->vruntime
4264         * absolute on their old rq until wakeup (needed for the fair sleeper
4265         * bonus in place_entity()).
4266         *
4267         * If it was on the rq, we've just 'preempted' it, which does convert
4268         * ->vruntime to a relative base.
4269         *
4270         * Make sure both cases convert their relative position when migrating
4271         * to another cgroup's rq. This does somewhat interfere with the
4272         * fair sleeper stuff for the first placement, but who cares.
4273         */
4274        if (!on_rq)
4275                p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
4276        set_task_rq(p, task_cpu(p));
4277        if (!on_rq)
4278                p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
4279}
4280#endif
4281
4282static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
4283{
4284        struct sched_entity *se = &task->se;
4285        unsigned int rr_interval = 0;
4286
4287        /*
4288         * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
4289         * idle runqueue:
4290         */
4291        if (rq->cfs.load.weight)
4292                rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
4293
4294        return rr_interval;
4295}
4296
4297/*
4298 * All the scheduling class methods:
4299 */
4300static const struct sched_class fair_sched_class = {
4301        .next                   = &idle_sched_class,
4302        .enqueue_task           = enqueue_task_fair,
4303        .dequeue_task           = dequeue_task_fair,
4304        .yield_task             = yield_task_fair,
4305        .yield_to_task          = yield_to_task_fair,
4306
4307        .check_preempt_curr     = check_preempt_wakeup,
4308
4309        .pick_next_task         = pick_next_task_fair,
4310        .put_prev_task          = put_prev_task_fair,
4311
4312#ifdef CONFIG_SMP
4313        .select_task_rq         = select_task_rq_fair,
4314
4315        .rq_online              = rq_online_fair,
4316        .rq_offline             = rq_offline_fair,
4317
4318        .task_waking            = task_waking_fair,
4319#endif
4320
4321        .set_curr_task          = set_curr_task_fair,
4322        .task_tick              = task_tick_fair,
4323        .task_fork              = task_fork_fair,
4324
4325        .prio_changed           = prio_changed_fair,
4326        .switched_from          = switched_from_fair,
4327        .switched_to            = switched_to_fair,
4328
4329        .get_rr_interval        = get_rr_interval_fair,
4330
4331#ifdef CONFIG_FAIR_GROUP_SCHED
4332        .task_move_group        = task_move_group_fair,
4333#endif
4334};
4335
4336#ifdef CONFIG_SCHED_DEBUG
4337static void print_cfs_stats(struct seq_file *m, int cpu)
4338{
4339        struct cfs_rq *cfs_rq;
4340
4341        rcu_read_lock();
4342        for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
4343                print_cfs_rq(m, cpu, cfs_rq);
4344        rcu_read_unlock();
4345}
4346#endif
4347
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.