linux/kernel/sched/rt.c
<<
>>
Prefs
   1/*
   2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
   3 * policies)
   4 */
   5
   6#include "sched.h"
   7
   8#include <linux/slab.h>
   9
  10static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
  11
  12struct rt_bandwidth def_rt_bandwidth;
  13
  14static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
  15{
  16        struct rt_bandwidth *rt_b =
  17                container_of(timer, struct rt_bandwidth, rt_period_timer);
  18        ktime_t now;
  19        int overrun;
  20        int idle = 0;
  21
  22        for (;;) {
  23                now = hrtimer_cb_get_time(timer);
  24                overrun = hrtimer_forward(timer, now, rt_b->rt_period);
  25
  26                if (!overrun)
  27                        break;
  28
  29                idle = do_sched_rt_period_timer(rt_b, overrun);
  30        }
  31
  32        return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
  33}
  34
  35void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
  36{
  37        rt_b->rt_period = ns_to_ktime(period);
  38        rt_b->rt_runtime = runtime;
  39
  40        raw_spin_lock_init(&rt_b->rt_runtime_lock);
  41
  42        hrtimer_init(&rt_b->rt_period_timer,
  43                        CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  44        rt_b->rt_period_timer.function = sched_rt_period_timer;
  45}
  46
  47static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
  48{
  49        if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
  50                return;
  51
  52        if (hrtimer_active(&rt_b->rt_period_timer))
  53                return;
  54
  55        raw_spin_lock(&rt_b->rt_runtime_lock);
  56        start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
  57        raw_spin_unlock(&rt_b->rt_runtime_lock);
  58}
  59
  60void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
  61{
  62        struct rt_prio_array *array;
  63        int i;
  64
  65        array = &rt_rq->active;
  66        for (i = 0; i < MAX_RT_PRIO; i++) {
  67                INIT_LIST_HEAD(array->queue + i);
  68                __clear_bit(i, array->bitmap);
  69        }
  70        /* delimiter for bitsearch: */
  71        __set_bit(MAX_RT_PRIO, array->bitmap);
  72
  73#if defined CONFIG_SMP
  74        rt_rq->highest_prio.curr = MAX_RT_PRIO;
  75        rt_rq->highest_prio.next = MAX_RT_PRIO;
  76        rt_rq->rt_nr_migratory = 0;
  77        rt_rq->overloaded = 0;
  78        plist_head_init(&rt_rq->pushable_tasks);
  79#endif
  80
  81        rt_rq->rt_time = 0;
  82        rt_rq->rt_throttled = 0;
  83        rt_rq->rt_runtime = 0;
  84        raw_spin_lock_init(&rt_rq->rt_runtime_lock);
  85}
  86
  87#ifdef CONFIG_RT_GROUP_SCHED
  88static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
  89{
  90        hrtimer_cancel(&rt_b->rt_period_timer);
  91}
  92
  93#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
  94
  95static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
  96{
  97#ifdef CONFIG_SCHED_DEBUG
  98        WARN_ON_ONCE(!rt_entity_is_task(rt_se));
  99#endif
 100        return container_of(rt_se, struct task_struct, rt);
 101}
 102
 103static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
 104{
 105        return rt_rq->rq;
 106}
 107
 108static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
 109{
 110        return rt_se->rt_rq;
 111}
 112
 113void free_rt_sched_group(struct task_group *tg)
 114{
 115        int i;
 116
 117        if (tg->rt_se)
 118                destroy_rt_bandwidth(&tg->rt_bandwidth);
 119
 120        for_each_possible_cpu(i) {
 121                if (tg->rt_rq)
 122                        kfree(tg->rt_rq[i]);
 123                if (tg->rt_se)
 124                        kfree(tg->rt_se[i]);
 125        }
 126
 127        kfree(tg->rt_rq);
 128        kfree(tg->rt_se);
 129}
 130
 131void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
 132                struct sched_rt_entity *rt_se, int cpu,
 133                struct sched_rt_entity *parent)
 134{
 135        struct rq *rq = cpu_rq(cpu);
 136
 137        rt_rq->highest_prio.curr = MAX_RT_PRIO;
 138        rt_rq->rt_nr_boosted = 0;
 139        rt_rq->rq = rq;
 140        rt_rq->tg = tg;
 141
 142        tg->rt_rq[cpu] = rt_rq;
 143        tg->rt_se[cpu] = rt_se;
 144
 145        if (!rt_se)
 146                return;
 147
 148        if (!parent)
 149                rt_se->rt_rq = &rq->rt;
 150        else
 151                rt_se->rt_rq = parent->my_q;
 152
 153        rt_se->my_q = rt_rq;
 154        rt_se->parent = parent;
 155        INIT_LIST_HEAD(&rt_se->run_list);
 156}
 157
 158int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
 159{
 160        struct rt_rq *rt_rq;
 161        struct sched_rt_entity *rt_se;
 162        int i;
 163
 164        tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
 165        if (!tg->rt_rq)
 166                goto err;
 167        tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
 168        if (!tg->rt_se)
 169                goto err;
 170
 171        init_rt_bandwidth(&tg->rt_bandwidth,
 172                        ktime_to_ns(def_rt_bandwidth.rt_period), 0);
 173
 174        for_each_possible_cpu(i) {
 175                rt_rq = kzalloc_node(sizeof(struct rt_rq),
 176                                     GFP_KERNEL, cpu_to_node(i));
 177                if (!rt_rq)
 178                        goto err;
 179
 180                rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
 181                                     GFP_KERNEL, cpu_to_node(i));
 182                if (!rt_se)
 183                        goto err_free_rq;
 184
 185                init_rt_rq(rt_rq, cpu_rq(i));
 186                rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
 187                init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
 188        }
 189
 190        return 1;
 191
 192err_free_rq:
 193        kfree(rt_rq);
 194err:
 195        return 0;
 196}
 197
 198#else /* CONFIG_RT_GROUP_SCHED */
 199
 200#define rt_entity_is_task(rt_se) (1)
 201
 202static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
 203{
 204        return container_of(rt_se, struct task_struct, rt);
 205}
 206
 207static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
 208{
 209        return container_of(rt_rq, struct rq, rt);
 210}
 211
 212static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
 213{
 214        struct task_struct *p = rt_task_of(rt_se);
 215        struct rq *rq = task_rq(p);
 216
 217        return &rq->rt;
 218}
 219
 220void free_rt_sched_group(struct task_group *tg) { }
 221
 222int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
 223{
 224        return 1;
 225}
 226#endif /* CONFIG_RT_GROUP_SCHED */
 227
 228#ifdef CONFIG_SMP
 229
 230static inline int rt_overloaded(struct rq *rq)
 231{
 232        return atomic_read(&rq->rd->rto_count);
 233}
 234
 235static inline void rt_set_overload(struct rq *rq)
 236{
 237        if (!rq->online)
 238                return;
 239
 240        cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
 241        /*
 242         * Make sure the mask is visible before we set
 243         * the overload count. That is checked to determine
 244         * if we should look at the mask. It would be a shame
 245         * if we looked at the mask, but the mask was not
 246         * updated yet.
 247         */
 248        wmb();
 249        atomic_inc(&rq->rd->rto_count);
 250}
 251
 252static inline void rt_clear_overload(struct rq *rq)
 253{
 254        if (!rq->online)
 255                return;
 256
 257        /* the order here really doesn't matter */
 258        atomic_dec(&rq->rd->rto_count);
 259        cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
 260}
 261
 262static void update_rt_migration(struct rt_rq *rt_rq)
 263{
 264        if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
 265                if (!rt_rq->overloaded) {
 266                        rt_set_overload(rq_of_rt_rq(rt_rq));
 267                        rt_rq->overloaded = 1;
 268                }
 269        } else if (rt_rq->overloaded) {
 270                rt_clear_overload(rq_of_rt_rq(rt_rq));
 271                rt_rq->overloaded = 0;
 272        }
 273}
 274
 275static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 276{
 277        struct task_struct *p;
 278
 279        if (!rt_entity_is_task(rt_se))
 280                return;
 281
 282        p = rt_task_of(rt_se);
 283        rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 284
 285        rt_rq->rt_nr_total++;
 286        if (p->nr_cpus_allowed > 1)
 287                rt_rq->rt_nr_migratory++;
 288
 289        update_rt_migration(rt_rq);
 290}
 291
 292static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 293{
 294        struct task_struct *p;
 295
 296        if (!rt_entity_is_task(rt_se))
 297                return;
 298
 299        p = rt_task_of(rt_se);
 300        rt_rq = &rq_of_rt_rq(rt_rq)->rt;
 301
 302        rt_rq->rt_nr_total--;
 303        if (p->nr_cpus_allowed > 1)
 304                rt_rq->rt_nr_migratory--;
 305
 306        update_rt_migration(rt_rq);
 307}
 308
 309static inline int has_pushable_tasks(struct rq *rq)
 310{
 311        return !plist_head_empty(&rq->rt.pushable_tasks);
 312}
 313
 314static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 315{
 316        plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
 317        plist_node_init(&p->pushable_tasks, p->prio);
 318        plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
 319
 320        /* Update the highest prio pushable task */
 321        if (p->prio < rq->rt.highest_prio.next)
 322                rq->rt.highest_prio.next = p->prio;
 323}
 324
 325static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
 326{
 327        plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
 328
 329        /* Update the new highest prio pushable task */
 330        if (has_pushable_tasks(rq)) {
 331                p = plist_first_entry(&rq->rt.pushable_tasks,
 332                                      struct task_struct, pushable_tasks);
 333                rq->rt.highest_prio.next = p->prio;
 334        } else
 335                rq->rt.highest_prio.next = MAX_RT_PRIO;
 336}
 337
 338#else
 339
 340static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 341{
 342}
 343
 344static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
 345{
 346}
 347
 348static inline
 349void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 350{
 351}
 352
 353static inline
 354void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 355{
 356}
 357
 358#endif /* CONFIG_SMP */
 359
 360static inline int on_rt_rq(struct sched_rt_entity *rt_se)
 361{
 362        return !list_empty(&rt_se->run_list);
 363}
 364
 365#ifdef CONFIG_RT_GROUP_SCHED
 366
 367static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 368{
 369        if (!rt_rq->tg)
 370                return RUNTIME_INF;
 371
 372        return rt_rq->rt_runtime;
 373}
 374
 375static inline u64 sched_rt_period(struct rt_rq *rt_rq)
 376{
 377        return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
 378}
 379
 380typedef struct task_group *rt_rq_iter_t;
 381
 382static inline struct task_group *next_task_group(struct task_group *tg)
 383{
 384        do {
 385                tg = list_entry_rcu(tg->list.next,
 386                        typeof(struct task_group), list);
 387        } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
 388
 389        if (&tg->list == &task_groups)
 390                tg = NULL;
 391
 392        return tg;
 393}
 394
 395#define for_each_rt_rq(rt_rq, iter, rq)                                 \
 396        for (iter = container_of(&task_groups, typeof(*iter), list);    \
 397                (iter = next_task_group(iter)) &&                       \
 398                (rt_rq = iter->rt_rq[cpu_of(rq)]);)
 399
 400static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
 401{
 402        list_add_rcu(&rt_rq->leaf_rt_rq_list,
 403                        &rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
 404}
 405
 406static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
 407{
 408        list_del_rcu(&rt_rq->leaf_rt_rq_list);
 409}
 410
 411#define for_each_leaf_rt_rq(rt_rq, rq) \
 412        list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
 413
 414#define for_each_sched_rt_entity(rt_se) \
 415        for (; rt_se; rt_se = rt_se->parent)
 416
 417static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 418{
 419        return rt_se->my_q;
 420}
 421
 422static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
 423static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
 424
 425static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 426{
 427        struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
 428        struct sched_rt_entity *rt_se;
 429
 430        int cpu = cpu_of(rq_of_rt_rq(rt_rq));
 431
 432        rt_se = rt_rq->tg->rt_se[cpu];
 433
 434        if (rt_rq->rt_nr_running) {
 435                if (rt_se && !on_rt_rq(rt_se))
 436                        enqueue_rt_entity(rt_se, false);
 437                if (rt_rq->highest_prio.curr < curr->prio)
 438                        resched_task(curr);
 439        }
 440}
 441
 442static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 443{
 444        struct sched_rt_entity *rt_se;
 445        int cpu = cpu_of(rq_of_rt_rq(rt_rq));
 446
 447        rt_se = rt_rq->tg->rt_se[cpu];
 448
 449        if (rt_se && on_rt_rq(rt_se))
 450                dequeue_rt_entity(rt_se);
 451}
 452
 453static inline int rt_rq_throttled(struct rt_rq *rt_rq)
 454{
 455        return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
 456}
 457
 458static int rt_se_boosted(struct sched_rt_entity *rt_se)
 459{
 460        struct rt_rq *rt_rq = group_rt_rq(rt_se);
 461        struct task_struct *p;
 462
 463        if (rt_rq)
 464                return !!rt_rq->rt_nr_boosted;
 465
 466        p = rt_task_of(rt_se);
 467        return p->prio != p->normal_prio;
 468}
 469
 470#ifdef CONFIG_SMP
 471static inline const struct cpumask *sched_rt_period_mask(void)
 472{
 473        return cpu_rq(smp_processor_id())->rd->span;
 474}
 475#else
 476static inline const struct cpumask *sched_rt_period_mask(void)
 477{
 478        return cpu_online_mask;
 479}
 480#endif
 481
 482static inline
 483struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
 484{
 485        return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
 486}
 487
 488static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 489{
 490        return &rt_rq->tg->rt_bandwidth;
 491}
 492
 493#else /* !CONFIG_RT_GROUP_SCHED */
 494
 495static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 496{
 497        return rt_rq->rt_runtime;
 498}
 499
 500static inline u64 sched_rt_period(struct rt_rq *rt_rq)
 501{
 502        return ktime_to_ns(def_rt_bandwidth.rt_period);
 503}
 504
 505typedef struct rt_rq *rt_rq_iter_t;
 506
 507#define for_each_rt_rq(rt_rq, iter, rq) \
 508        for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
 509
 510static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
 511{
 512}
 513
 514static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
 515{
 516}
 517
 518#define for_each_leaf_rt_rq(rt_rq, rq) \
 519        for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
 520
 521#define for_each_sched_rt_entity(rt_se) \
 522        for (; rt_se; rt_se = NULL)
 523
 524static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 525{
 526        return NULL;
 527}
 528
 529static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 530{
 531        if (rt_rq->rt_nr_running)
 532                resched_task(rq_of_rt_rq(rt_rq)->curr);
 533}
 534
 535static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 536{
 537}
 538
 539static inline int rt_rq_throttled(struct rt_rq *rt_rq)
 540{
 541        return rt_rq->rt_throttled;
 542}
 543
 544static inline const struct cpumask *sched_rt_period_mask(void)
 545{
 546        return cpu_online_mask;
 547}
 548
 549static inline
 550struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
 551{
 552        return &cpu_rq(cpu)->rt;
 553}
 554
 555static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 556{
 557        return &def_rt_bandwidth;
 558}
 559
 560#endif /* CONFIG_RT_GROUP_SCHED */
 561
 562#ifdef CONFIG_SMP
 563/*
 564 * We ran out of runtime, see if we can borrow some from our neighbours.
 565 */
 566static int do_balance_runtime(struct rt_rq *rt_rq)
 567{
 568        struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 569        struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
 570        int i, weight, more = 0;
 571        u64 rt_period;
 572
 573        weight = cpumask_weight(rd->span);
 574
 575        raw_spin_lock(&rt_b->rt_runtime_lock);
 576        rt_period = ktime_to_ns(rt_b->rt_period);
 577        for_each_cpu(i, rd->span) {
 578                struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
 579                s64 diff;
 580
 581                if (iter == rt_rq)
 582                        continue;
 583
 584                raw_spin_lock(&iter->rt_runtime_lock);
 585                /*
 586                 * Either all rqs have inf runtime and there's nothing to steal
 587                 * or __disable_runtime() below sets a specific rq to inf to
 588                 * indicate its been disabled and disalow stealing.
 589                 */
 590                if (iter->rt_runtime == RUNTIME_INF)
 591                        goto next;
 592
 593                /*
 594                 * From runqueues with spare time, take 1/n part of their
 595                 * spare time, but no more than our period.
 596                 */
 597                diff = iter->rt_runtime - iter->rt_time;
 598                if (diff > 0) {
 599                        diff = div_u64((u64)diff, weight);
 600                        if (rt_rq->rt_runtime + diff > rt_period)
 601                                diff = rt_period - rt_rq->rt_runtime;
 602                        iter->rt_runtime -= diff;
 603                        rt_rq->rt_runtime += diff;
 604                        more = 1;
 605                        if (rt_rq->rt_runtime == rt_period) {
 606                                raw_spin_unlock(&iter->rt_runtime_lock);
 607                                break;
 608                        }
 609                }
 610next:
 611                raw_spin_unlock(&iter->rt_runtime_lock);
 612        }
 613        raw_spin_unlock(&rt_b->rt_runtime_lock);
 614
 615        return more;
 616}
 617
 618/*
 619 * Ensure this RQ takes back all the runtime it lend to its neighbours.
 620 */
 621static void __disable_runtime(struct rq *rq)
 622{
 623        struct root_domain *rd = rq->rd;
 624        rt_rq_iter_t iter;
 625        struct rt_rq *rt_rq;
 626
 627        if (unlikely(!scheduler_running))
 628                return;
 629
 630        for_each_rt_rq(rt_rq, iter, rq) {
 631                struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 632                s64 want;
 633                int i;
 634
 635                raw_spin_lock(&rt_b->rt_runtime_lock);
 636                raw_spin_lock(&rt_rq->rt_runtime_lock);
 637                /*
 638                 * Either we're all inf and nobody needs to borrow, or we're
 639                 * already disabled and thus have nothing to do, or we have
 640                 * exactly the right amount of runtime to take out.
 641                 */
 642                if (rt_rq->rt_runtime == RUNTIME_INF ||
 643                                rt_rq->rt_runtime == rt_b->rt_runtime)
 644                        goto balanced;
 645                raw_spin_unlock(&rt_rq->rt_runtime_lock);
 646
 647                /*
 648                 * Calculate the difference between what we started out with
 649                 * and what we current have, that's the amount of runtime
 650                 * we lend and now have to reclaim.
 651                 */
 652                want = rt_b->rt_runtime - rt_rq->rt_runtime;
 653
 654                /*
 655                 * Greedy reclaim, take back as much as we can.
 656                 */
 657                for_each_cpu(i, rd->span) {
 658                        struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
 659                        s64 diff;
 660
 661                        /*
 662                         * Can't reclaim from ourselves or disabled runqueues.
 663                         */
 664                        if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
 665                                continue;
 666
 667                        raw_spin_lock(&iter->rt_runtime_lock);
 668                        if (want > 0) {
 669                                diff = min_t(s64, iter->rt_runtime, want);
 670                                iter->rt_runtime -= diff;
 671                                want -= diff;
 672                        } else {
 673                                iter->rt_runtime -= want;
 674                                want -= want;
 675                        }
 676                        raw_spin_unlock(&iter->rt_runtime_lock);
 677
 678                        if (!want)
 679                                break;
 680                }
 681
 682                raw_spin_lock(&rt_rq->rt_runtime_lock);
 683                /*
 684                 * We cannot be left wanting - that would mean some runtime
 685                 * leaked out of the system.
 686                 */
 687                BUG_ON(want);
 688balanced:
 689                /*
 690                 * Disable all the borrow logic by pretending we have inf
 691                 * runtime - in which case borrowing doesn't make sense.
 692                 */
 693                rt_rq->rt_runtime = RUNTIME_INF;
 694                rt_rq->rt_throttled = 0;
 695                raw_spin_unlock(&rt_rq->rt_runtime_lock);
 696                raw_spin_unlock(&rt_b->rt_runtime_lock);
 697        }
 698}
 699
 700static void disable_runtime(struct rq *rq)
 701{
 702        unsigned long flags;
 703
 704        raw_spin_lock_irqsave(&rq->lock, flags);
 705        __disable_runtime(rq);
 706        raw_spin_unlock_irqrestore(&rq->lock, flags);
 707}
 708
 709static void __enable_runtime(struct rq *rq)
 710{
 711        rt_rq_iter_t iter;
 712        struct rt_rq *rt_rq;
 713
 714        if (unlikely(!scheduler_running))
 715                return;
 716
 717        /*
 718         * Reset each runqueue's bandwidth settings
 719         */
 720        for_each_rt_rq(rt_rq, iter, rq) {
 721                struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 722
 723                raw_spin_lock(&rt_b->rt_runtime_lock);
 724                raw_spin_lock(&rt_rq->rt_runtime_lock);
 725                rt_rq->rt_runtime = rt_b->rt_runtime;
 726                rt_rq->rt_time = 0;
 727                rt_rq->rt_throttled = 0;
 728                raw_spin_unlock(&rt_rq->rt_runtime_lock);
 729                raw_spin_unlock(&rt_b->rt_runtime_lock);
 730        }
 731}
 732
 733static void enable_runtime(struct rq *rq)
 734{
 735        unsigned long flags;
 736
 737        raw_spin_lock_irqsave(&rq->lock, flags);
 738        __enable_runtime(rq);
 739        raw_spin_unlock_irqrestore(&rq->lock, flags);
 740}
 741
 742int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu)
 743{
 744        int cpu = (int)(long)hcpu;
 745
 746        switch (action) {
 747        case CPU_DOWN_PREPARE:
 748        case CPU_DOWN_PREPARE_FROZEN:
 749                disable_runtime(cpu_rq(cpu));
 750                return NOTIFY_OK;
 751
 752        case CPU_DOWN_FAILED:
 753        case CPU_DOWN_FAILED_FROZEN:
 754        case CPU_ONLINE:
 755        case CPU_ONLINE_FROZEN:
 756                enable_runtime(cpu_rq(cpu));
 757                return NOTIFY_OK;
 758
 759        default:
 760                return NOTIFY_DONE;
 761        }
 762}
 763
 764static int balance_runtime(struct rt_rq *rt_rq)
 765{
 766        int more = 0;
 767
 768        if (!sched_feat(RT_RUNTIME_SHARE))
 769                return more;
 770
 771        if (rt_rq->rt_time > rt_rq->rt_runtime) {
 772                raw_spin_unlock(&rt_rq->rt_runtime_lock);
 773                more = do_balance_runtime(rt_rq);
 774                raw_spin_lock(&rt_rq->rt_runtime_lock);
 775        }
 776
 777        return more;
 778}
 779#else /* !CONFIG_SMP */
 780static inline int balance_runtime(struct rt_rq *rt_rq)
 781{
 782        return 0;
 783}
 784#endif /* CONFIG_SMP */
 785
 786static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
 787{
 788        int i, idle = 1, throttled = 0;
 789        const struct cpumask *span;
 790
 791        span = sched_rt_period_mask();
 792#ifdef CONFIG_RT_GROUP_SCHED
 793        /*
 794         * FIXME: isolated CPUs should really leave the root task group,
 795         * whether they are isolcpus or were isolated via cpusets, lest
 796         * the timer run on a CPU which does not service all runqueues,
 797         * potentially leaving other CPUs indefinitely throttled.  If
 798         * isolation is really required, the user will turn the throttle
 799         * off to kill the perturbations it causes anyway.  Meanwhile,
 800         * this maintains functionality for boot and/or troubleshooting.
 801         */
 802        if (rt_b == &root_task_group.rt_bandwidth)
 803                span = cpu_online_mask;
 804#endif
 805        for_each_cpu(i, span) {
 806                int enqueue = 0;
 807                struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
 808                struct rq *rq = rq_of_rt_rq(rt_rq);
 809
 810                raw_spin_lock(&rq->lock);
 811                if (rt_rq->rt_time) {
 812                        u64 runtime;
 813
 814                        raw_spin_lock(&rt_rq->rt_runtime_lock);
 815                        if (rt_rq->rt_throttled)
 816                                balance_runtime(rt_rq);
 817                        runtime = rt_rq->rt_runtime;
 818                        rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
 819                        if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
 820                                rt_rq->rt_throttled = 0;
 821                                enqueue = 1;
 822
 823                                /*
 824                                 * Force a clock update if the CPU was idle,
 825                                 * lest wakeup -> unthrottle time accumulate.
 826                                 */
 827                                if (rt_rq->rt_nr_running && rq->curr == rq->idle)
 828                                        rq->skip_clock_update = -1;
 829                        }
 830                        if (rt_rq->rt_time || rt_rq->rt_nr_running)
 831                                idle = 0;
 832                        raw_spin_unlock(&rt_rq->rt_runtime_lock);
 833                } else if (rt_rq->rt_nr_running) {
 834                        idle = 0;
 835                        if (!rt_rq_throttled(rt_rq))
 836                                enqueue = 1;
 837                }
 838                if (rt_rq->rt_throttled)
 839                        throttled = 1;
 840
 841                if (enqueue)
 842                        sched_rt_rq_enqueue(rt_rq);
 843                raw_spin_unlock(&rq->lock);
 844        }
 845
 846        if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
 847                return 1;
 848
 849        return idle;
 850}
 851
 852static inline int rt_se_prio(struct sched_rt_entity *rt_se)
 853{
 854#ifdef CONFIG_RT_GROUP_SCHED
 855        struct rt_rq *rt_rq = group_rt_rq(rt_se);
 856
 857        if (rt_rq)
 858                return rt_rq->highest_prio.curr;
 859#endif
 860
 861        return rt_task_of(rt_se)->prio;
 862}
 863
 864static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
 865{
 866        u64 runtime = sched_rt_runtime(rt_rq);
 867
 868        if (rt_rq->rt_throttled)
 869                return rt_rq_throttled(rt_rq);
 870
 871        if (runtime >= sched_rt_period(rt_rq))
 872                return 0;
 873
 874        balance_runtime(rt_rq);
 875        runtime = sched_rt_runtime(rt_rq);
 876        if (runtime == RUNTIME_INF)
 877                return 0;
 878
 879        if (rt_rq->rt_time > runtime) {
 880                struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 881
 882                /*
 883                 * Don't actually throttle groups that have no runtime assigned
 884                 * but accrue some time due to boosting.
 885                 */
 886                if (likely(rt_b->rt_runtime)) {
 887                        static bool once = false;
 888
 889                        rt_rq->rt_throttled = 1;
 890
 891                        if (!once) {
 892                                once = true;
 893                                printk_sched("sched: RT throttling activated\n");
 894                        }
 895                } else {
 896                        /*
 897                         * In case we did anyway, make it go away,
 898                         * replenishment is a joke, since it will replenish us
 899                         * with exactly 0 ns.
 900                         */
 901                        rt_rq->rt_time = 0;
 902                }
 903
 904                if (rt_rq_throttled(rt_rq)) {
 905                        sched_rt_rq_dequeue(rt_rq);
 906                        return 1;
 907                }
 908        }
 909
 910        return 0;
 911}
 912
 913/*
 914 * Update the current task's runtime statistics. Skip current tasks that
 915 * are not in our scheduling class.
 916 */
 917static void update_curr_rt(struct rq *rq)
 918{
 919        struct task_struct *curr = rq->curr;
 920        struct sched_rt_entity *rt_se = &curr->rt;
 921        struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
 922        u64 delta_exec;
 923
 924        if (curr->sched_class != &rt_sched_class)
 925                return;
 926
 927        delta_exec = rq->clock_task - curr->se.exec_start;
 928        if (unlikely((s64)delta_exec < 0))
 929                delta_exec = 0;
 930
 931        schedstat_set(curr->se.statistics.exec_max,
 932                      max(curr->se.statistics.exec_max, delta_exec));
 933
 934        curr->se.sum_exec_runtime += delta_exec;
 935        account_group_exec_runtime(curr, delta_exec);
 936
 937        curr->se.exec_start = rq->clock_task;
 938        cpuacct_charge(curr, delta_exec);
 939
 940        sched_rt_avg_update(rq, delta_exec);
 941
 942        if (!rt_bandwidth_enabled())
 943                return;
 944
 945        for_each_sched_rt_entity(rt_se) {
 946                rt_rq = rt_rq_of_se(rt_se);
 947
 948                if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
 949                        raw_spin_lock(&rt_rq->rt_runtime_lock);
 950                        rt_rq->rt_time += delta_exec;
 951                        if (sched_rt_runtime_exceeded(rt_rq))
 952                                resched_task(curr);
 953                        raw_spin_unlock(&rt_rq->rt_runtime_lock);
 954                }
 955        }
 956}
 957
 958#if defined CONFIG_SMP
 959
 960static void
 961inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
 962{
 963        struct rq *rq = rq_of_rt_rq(rt_rq);
 964
 965        if (rq->online && prio < prev_prio)
 966                cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
 967}
 968
 969static void
 970dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
 971{
 972        struct rq *rq = rq_of_rt_rq(rt_rq);
 973
 974        if (rq->online && rt_rq->highest_prio.curr != prev_prio)
 975                cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
 976}
 977
 978#else /* CONFIG_SMP */
 979
 980static inline
 981void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
 982static inline
 983void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
 984
 985#endif /* CONFIG_SMP */
 986
 987#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
 988static void
 989inc_rt_prio(struct rt_rq *rt_rq, int prio)
 990{
 991        int prev_prio = rt_rq->highest_prio.curr;
 992
 993        if (prio < prev_prio)
 994                rt_rq->highest_prio.curr = prio;
 995
 996        inc_rt_prio_smp(rt_rq, prio, prev_prio);
 997}
 998
 999static void
1000dec_rt_prio(struct rt_rq *rt_rq, int prio)
1001{
1002        int prev_prio = rt_rq->highest_prio.curr;
1003
1004        if (rt_rq->rt_nr_running) {
1005
1006                WARN_ON(prio < prev_prio);
1007
1008                /*
1009                 * This may have been our highest task, and therefore
1010                 * we may have some recomputation to do
1011                 */
1012                if (prio == prev_prio) {
1013                        struct rt_prio_array *array = &rt_rq->active;
1014
1015                        rt_rq->highest_prio.curr =
1016                                sched_find_first_bit(array->bitmap);
1017                }
1018
1019        } else
1020                rt_rq->highest_prio.curr = MAX_RT_PRIO;
1021
1022        dec_rt_prio_smp(rt_rq, prio, prev_prio);
1023}
1024
1025#else
1026
1027static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1028static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1029
1030#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1031
1032#ifdef CONFIG_RT_GROUP_SCHED
1033
1034static void
1035inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1036{
1037        if (rt_se_boosted(rt_se))
1038                rt_rq->rt_nr_boosted++;
1039
1040        if (rt_rq->tg)
1041                start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1042}
1043
1044static void
1045dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1046{
1047        if (rt_se_boosted(rt_se))
1048                rt_rq->rt_nr_boosted--;
1049
1050        WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1051}
1052
1053#else /* CONFIG_RT_GROUP_SCHED */
1054
1055static void
1056inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1057{
1058        start_rt_bandwidth(&def_rt_bandwidth);
1059}
1060
1061static inline
1062void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1063
1064#endif /* CONFIG_RT_GROUP_SCHED */
1065
1066static inline
1067void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1068{
1069        int prio = rt_se_prio(rt_se);
1070
1071        WARN_ON(!rt_prio(prio));
1072        rt_rq->rt_nr_running++;
1073
1074        inc_rt_prio(rt_rq, prio);
1075        inc_rt_migration(rt_se, rt_rq);
1076        inc_rt_group(rt_se, rt_rq);
1077}
1078
1079static inline
1080void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1081{
1082        WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1083        WARN_ON(!rt_rq->rt_nr_running);
1084        rt_rq->rt_nr_running--;
1085
1086        dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1087        dec_rt_migration(rt_se, rt_rq);
1088        dec_rt_group(rt_se, rt_rq);
1089}
1090
1091static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1092{
1093        struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1094        struct rt_prio_array *array = &rt_rq->active;
1095        struct rt_rq *group_rq = group_rt_rq(rt_se);
1096        struct list_head *queue = array->queue + rt_se_prio(rt_se);
1097
1098        /*
1099         * Don't enqueue the group if its throttled, or when empty.
1100         * The latter is a consequence of the former when a child group
1101         * get throttled and the current group doesn't have any other
1102         * active members.
1103         */
1104        if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1105                return;
1106
1107        if (!rt_rq->rt_nr_running)
1108                list_add_leaf_rt_rq(rt_rq);
1109
1110        if (head)
1111                list_add(&rt_se->run_list, queue);
1112        else
1113                list_add_tail(&rt_se->run_list, queue);
1114        __set_bit(rt_se_prio(rt_se), array->bitmap);
1115
1116        inc_rt_tasks(rt_se, rt_rq);
1117}
1118
1119static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1120{
1121        struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1122        struct rt_prio_array *array = &rt_rq->active;
1123
1124        list_del_init(&rt_se->run_list);
1125        if (list_empty(array->queue + rt_se_prio(rt_se)))
1126                __clear_bit(rt_se_prio(rt_se), array->bitmap);
1127
1128        dec_rt_tasks(rt_se, rt_rq);
1129        if (!rt_rq->rt_nr_running)
1130                list_del_leaf_rt_rq(rt_rq);
1131}
1132
1133/*
1134 * Because the prio of an upper entry depends on the lower
1135 * entries, we must remove entries top - down.
1136 */
1137static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1138{
1139        struct sched_rt_entity *back = NULL;
1140
1141        for_each_sched_rt_entity(rt_se) {
1142                rt_se->back = back;
1143                back = rt_se;
1144        }
1145
1146        for (rt_se = back; rt_se; rt_se = rt_se->back) {
1147                if (on_rt_rq(rt_se))
1148                        __dequeue_rt_entity(rt_se);
1149        }
1150}
1151
1152static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1153{
1154        dequeue_rt_stack(rt_se);
1155        for_each_sched_rt_entity(rt_se)
1156                __enqueue_rt_entity(rt_se, head);
1157}
1158
1159static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1160{
1161        dequeue_rt_stack(rt_se);
1162
1163        for_each_sched_rt_entity(rt_se) {
1164                struct rt_rq *rt_rq = group_rt_rq(rt_se);
1165
1166                if (rt_rq && rt_rq->rt_nr_running)
1167                        __enqueue_rt_entity(rt_se, false);
1168        }
1169}
1170
1171/*
1172 * Adding/removing a task to/from a priority array:
1173 */
1174static void
1175enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1176{
1177        struct sched_rt_entity *rt_se = &p->rt;
1178
1179        if (flags & ENQUEUE_WAKEUP)
1180                rt_se->timeout = 0;
1181
1182        enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1183
1184        if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1185                enqueue_pushable_task(rq, p);
1186
1187        inc_nr_running(rq);
1188}
1189
1190static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1191{
1192        struct sched_rt_entity *rt_se = &p->rt;
1193
1194        update_curr_rt(rq);
1195        dequeue_rt_entity(rt_se);
1196
1197        dequeue_pushable_task(rq, p);
1198
1199        dec_nr_running(rq);
1200}
1201
1202/*
1203 * Put task to the head or the end of the run list without the overhead of
1204 * dequeue followed by enqueue.
1205 */
1206static void
1207requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1208{
1209        if (on_rt_rq(rt_se)) {
1210                struct rt_prio_array *array = &rt_rq->active;
1211                struct list_head *queue = array->queue + rt_se_prio(rt_se);
1212
1213                if (head)
1214                        list_move(&rt_se->run_list, queue);
1215                else
1216                        list_move_tail(&rt_se->run_list, queue);
1217        }
1218}
1219
1220static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1221{
1222        struct sched_rt_entity *rt_se = &p->rt;
1223        struct rt_rq *rt_rq;
1224
1225        for_each_sched_rt_entity(rt_se) {
1226                rt_rq = rt_rq_of_se(rt_se);
1227                requeue_rt_entity(rt_rq, rt_se, head);
1228        }
1229}
1230
1231static void yield_task_rt(struct rq *rq)
1232{
1233        requeue_task_rt(rq, rq->curr, 0);
1234}
1235
1236#ifdef CONFIG_SMP
1237static int find_lowest_rq(struct task_struct *task);
1238
1239static int
1240select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
1241{
1242        struct task_struct *curr;
1243        struct rq *rq;
1244        int cpu;
1245
1246        cpu = task_cpu(p);
1247
1248        if (p->nr_cpus_allowed == 1)
1249                goto out;
1250
1251        /* For anything but wake ups, just return the task_cpu */
1252        if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1253                goto out;
1254
1255        rq = cpu_rq(cpu);
1256
1257        rcu_read_lock();
1258        curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1259
1260        /*
1261         * If the current task on @p's runqueue is an RT task, then
1262         * try to see if we can wake this RT task up on another
1263         * runqueue. Otherwise simply start this RT task
1264         * on its current runqueue.
1265         *
1266         * We want to avoid overloading runqueues. If the woken
1267         * task is a higher priority, then it will stay on this CPU
1268         * and the lower prio task should be moved to another CPU.
1269         * Even though this will probably make the lower prio task
1270         * lose its cache, we do not want to bounce a higher task
1271         * around just because it gave up its CPU, perhaps for a
1272         * lock?
1273         *
1274         * For equal prio tasks, we just let the scheduler sort it out.
1275         *
1276         * Otherwise, just let it ride on the affined RQ and the
1277         * post-schedule router will push the preempted task away
1278         *
1279         * This test is optimistic, if we get it wrong the load-balancer
1280         * will have to sort it out.
1281         */
1282        if (curr && unlikely(rt_task(curr)) &&
1283            (curr->nr_cpus_allowed < 2 ||
1284             curr->prio <= p->prio) &&
1285            (p->nr_cpus_allowed > 1)) {
1286                int target = find_lowest_rq(p);
1287
1288                if (target != -1)
1289                        cpu = target;
1290        }
1291        rcu_read_unlock();
1292
1293out:
1294        return cpu;
1295}
1296
1297static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1298{
1299        if (rq->curr->nr_cpus_allowed == 1)
1300                return;
1301
1302        if (p->nr_cpus_allowed != 1
1303            && cpupri_find(&rq->rd->cpupri, p, NULL))
1304                return;
1305
1306        if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1307                return;
1308
1309        /*
1310         * There appears to be other cpus that can accept
1311         * current and none to run 'p', so lets reschedule
1312         * to try and push current away:
1313         */
1314        requeue_task_rt(rq, p, 1);
1315        resched_task(rq->curr);
1316}
1317
1318#endif /* CONFIG_SMP */
1319
1320/*
1321 * Preempt the current task with a newly woken task if needed:
1322 */
1323static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1324{
1325        if (p->prio < rq->curr->prio) {
1326                resched_task(rq->curr);
1327                return;
1328        }
1329
1330#ifdef CONFIG_SMP
1331        /*
1332         * If:
1333         *
1334         * - the newly woken task is of equal priority to the current task
1335         * - the newly woken task is non-migratable while current is migratable
1336         * - current will be preempted on the next reschedule
1337         *
1338         * we should check to see if current can readily move to a different
1339         * cpu.  If so, we will reschedule to allow the push logic to try
1340         * to move current somewhere else, making room for our non-migratable
1341         * task.
1342         */
1343        if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1344                check_preempt_equal_prio(rq, p);
1345#endif
1346}
1347
1348static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1349                                                   struct rt_rq *rt_rq)
1350{
1351        struct rt_prio_array *array = &rt_rq->active;
1352        struct sched_rt_entity *next = NULL;
1353        struct list_head *queue;
1354        int idx;
1355
1356        idx = sched_find_first_bit(array->bitmap);
1357        BUG_ON(idx >= MAX_RT_PRIO);
1358
1359        queue = array->queue + idx;
1360        next = list_entry(queue->next, struct sched_rt_entity, run_list);
1361
1362        return next;
1363}
1364
1365static struct task_struct *_pick_next_task_rt(struct rq *rq)
1366{
1367        struct sched_rt_entity *rt_se;
1368        struct task_struct *p;
1369        struct rt_rq *rt_rq;
1370
1371        rt_rq = &rq->rt;
1372
1373        if (!rt_rq->rt_nr_running)
1374                return NULL;
1375
1376        if (rt_rq_throttled(rt_rq))
1377                return NULL;
1378
1379        do {
1380                rt_se = pick_next_rt_entity(rq, rt_rq);
1381                BUG_ON(!rt_se);
1382                rt_rq = group_rt_rq(rt_se);
1383        } while (rt_rq);
1384
1385        p = rt_task_of(rt_se);
1386        p->se.exec_start = rq->clock_task;
1387
1388        return p;
1389}
1390
1391static struct task_struct *pick_next_task_rt(struct rq *rq)
1392{
1393        struct task_struct *p = _pick_next_task_rt(rq);
1394
1395        /* The running task is never eligible for pushing */
1396        if (p)
1397                dequeue_pushable_task(rq, p);
1398
1399#ifdef CONFIG_SMP
1400        /*
1401         * We detect this state here so that we can avoid taking the RQ
1402         * lock again later if there is no need to push
1403         */
1404        rq->post_schedule = has_pushable_tasks(rq);
1405#endif
1406
1407        return p;
1408}
1409
1410static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1411{
1412        update_curr_rt(rq);
1413
1414        /*
1415         * The previous task needs to be made eligible for pushing
1416         * if it is still active
1417         */
1418        if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1419                enqueue_pushable_task(rq, p);
1420}
1421
1422#ifdef CONFIG_SMP
1423
1424/* Only try algorithms three times */
1425#define RT_MAX_TRIES 3
1426
1427static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1428{
1429        if (!task_running(rq, p) &&
1430            (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
1431            (p->nr_cpus_allowed > 1))
1432                return 1;
1433        return 0;
1434}
1435
1436/* Return the second highest RT task, NULL otherwise */
1437static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1438{
1439        struct task_struct *next = NULL;
1440        struct sched_rt_entity *rt_se;
1441        struct rt_prio_array *array;
1442        struct rt_rq *rt_rq;
1443        int idx;
1444
1445        for_each_leaf_rt_rq(rt_rq, rq) {
1446                array = &rt_rq->active;
1447                idx = sched_find_first_bit(array->bitmap);
1448next_idx:
1449                if (idx >= MAX_RT_PRIO)
1450                        continue;
1451                if (next && next->prio <= idx)
1452                        continue;
1453                list_for_each_entry(rt_se, array->queue + idx, run_list) {
1454                        struct task_struct *p;
1455
1456                        if (!rt_entity_is_task(rt_se))
1457                                continue;
1458
1459                        p = rt_task_of(rt_se);
1460                        if (pick_rt_task(rq, p, cpu)) {
1461                                next = p;
1462                                break;
1463                        }
1464                }
1465                if (!next) {
1466                        idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1467                        goto next_idx;
1468                }
1469        }
1470
1471        return next;
1472}
1473
1474static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1475
1476static int find_lowest_rq(struct task_struct *task)
1477{
1478        struct sched_domain *sd;
1479        struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1480        int this_cpu = smp_processor_id();
1481        int cpu      = task_cpu(task);
1482
1483        /* Make sure the mask is initialized first */
1484        if (unlikely(!lowest_mask))
1485                return -1;
1486
1487        if (task->nr_cpus_allowed == 1)
1488                return -1; /* No other targets possible */
1489
1490        if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1491                return -1; /* No targets found */
1492
1493        /*
1494         * At this point we have built a mask of cpus representing the
1495         * lowest priority tasks in the system.  Now we want to elect
1496         * the best one based on our affinity and topology.
1497         *
1498         * We prioritize the last cpu that the task executed on since
1499         * it is most likely cache-hot in that location.
1500         */
1501        if (cpumask_test_cpu(cpu, lowest_mask))
1502                return cpu;
1503
1504        /*
1505         * Otherwise, we consult the sched_domains span maps to figure
1506         * out which cpu is logically closest to our hot cache data.
1507         */
1508        if (!cpumask_test_cpu(this_cpu, lowest_mask))
1509                this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1510
1511        rcu_read_lock();
1512        for_each_domain(cpu, sd) {
1513                if (sd->flags & SD_WAKE_AFFINE) {
1514                        int best_cpu;
1515
1516                        /*
1517                         * "this_cpu" is cheaper to preempt than a
1518                         * remote processor.
1519                         */
1520                        if (this_cpu != -1 &&
1521                            cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1522                                rcu_read_unlock();
1523                                return this_cpu;
1524                        }
1525
1526                        best_cpu = cpumask_first_and(lowest_mask,
1527                                                     sched_domain_span(sd));
1528                        if (best_cpu < nr_cpu_ids) {
1529                                rcu_read_unlock();
1530                                return best_cpu;
1531                        }
1532                }
1533        }
1534        rcu_read_unlock();
1535
1536        /*
1537         * And finally, if there were no matches within the domains
1538         * just give the caller *something* to work with from the compatible
1539         * locations.
1540         */
1541        if (this_cpu != -1)
1542                return this_cpu;
1543
1544        cpu = cpumask_any(lowest_mask);
1545        if (cpu < nr_cpu_ids)
1546                return cpu;
1547        return -1;
1548}
1549
1550/* Will lock the rq it finds */
1551static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1552{
1553        struct rq *lowest_rq = NULL;
1554        int tries;
1555        int cpu;
1556
1557        for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1558                cpu = find_lowest_rq(task);
1559
1560                if ((cpu == -1) || (cpu == rq->cpu))
1561                        break;
1562
1563                lowest_rq = cpu_rq(cpu);
1564
1565                /* if the prio of this runqueue changed, try again */
1566                if (double_lock_balance(rq, lowest_rq)) {
1567                        /*
1568                         * We had to unlock the run queue. In
1569                         * the mean time, task could have
1570                         * migrated already or had its affinity changed.
1571                         * Also make sure that it wasn't scheduled on its rq.
1572                         */
1573                        if (unlikely(task_rq(task) != rq ||
1574                                     !cpumask_test_cpu(lowest_rq->cpu,
1575                                                       tsk_cpus_allowed(task)) ||
1576                                     task_running(rq, task) ||
1577                                     !task->on_rq)) {
1578
1579                                double_unlock_balance(rq, lowest_rq);
1580                                lowest_rq = NULL;
1581                                break;
1582                        }
1583                }
1584
1585                /* If this rq is still suitable use it. */
1586                if (lowest_rq->rt.highest_prio.curr > task->prio)
1587                        break;
1588
1589                /* try again */
1590                double_unlock_balance(rq, lowest_rq);
1591                lowest_rq = NULL;
1592        }
1593
1594        return lowest_rq;
1595}
1596
1597static struct task_struct *pick_next_pushable_task(struct rq *rq)
1598{
1599        struct task_struct *p;
1600
1601        if (!has_pushable_tasks(rq))
1602                return NULL;
1603
1604        p = plist_first_entry(&rq->rt.pushable_tasks,
1605                              struct task_struct, pushable_tasks);
1606
1607        BUG_ON(rq->cpu != task_cpu(p));
1608        BUG_ON(task_current(rq, p));
1609        BUG_ON(p->nr_cpus_allowed <= 1);
1610
1611        BUG_ON(!p->on_rq);
1612        BUG_ON(!rt_task(p));
1613
1614        return p;
1615}
1616
1617/*
1618 * If the current CPU has more than one RT task, see if the non
1619 * running task can migrate over to a CPU that is running a task
1620 * of lesser priority.
1621 */
1622static int push_rt_task(struct rq *rq)
1623{
1624        struct task_struct *next_task;
1625        struct rq *lowest_rq;
1626        int ret = 0;
1627
1628        if (!rq->rt.overloaded)
1629                return 0;
1630
1631        next_task = pick_next_pushable_task(rq);
1632        if (!next_task)
1633                return 0;
1634
1635#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1636       if (unlikely(task_running(rq, next_task)))
1637               return 0;
1638#endif
1639
1640retry:
1641        if (unlikely(next_task == rq->curr)) {
1642                WARN_ON(1);
1643                return 0;
1644        }
1645
1646        /*
1647         * It's possible that the next_task slipped in of
1648         * higher priority than current. If that's the case
1649         * just reschedule current.
1650         */
1651        if (unlikely(next_task->prio < rq->curr->prio)) {
1652                resched_task(rq->curr);
1653                return 0;
1654        }
1655
1656        /* We might release rq lock */
1657        get_task_struct(next_task);
1658
1659        /* find_lock_lowest_rq locks the rq if found */
1660        lowest_rq = find_lock_lowest_rq(next_task, rq);
1661        if (!lowest_rq) {
1662                struct task_struct *task;
1663                /*
1664                 * find_lock_lowest_rq releases rq->lock
1665                 * so it is possible that next_task has migrated.
1666                 *
1667                 * We need to make sure that the task is still on the same
1668                 * run-queue and is also still the next task eligible for
1669                 * pushing.
1670                 */
1671                task = pick_next_pushable_task(rq);
1672                if (task_cpu(next_task) == rq->cpu && task == next_task) {
1673                        /*
1674                         * The task hasn't migrated, and is still the next
1675                         * eligible task, but we failed to find a run-queue
1676                         * to push it to.  Do not retry in this case, since
1677                         * other cpus will pull from us when ready.
1678                         */
1679                        goto out;
1680                }
1681
1682                if (!task)
1683                        /* No more tasks, just exit */
1684                        goto out;
1685
1686                /*
1687                 * Something has shifted, try again.
1688                 */
1689                put_task_struct(next_task);
1690                next_task = task;
1691                goto retry;
1692        }
1693
1694        deactivate_task(rq, next_task, 0);
1695        set_task_cpu(next_task, lowest_rq->cpu);
1696        activate_task(lowest_rq, next_task, 0);
1697        ret = 1;
1698
1699        resched_task(lowest_rq->curr);
1700
1701        double_unlock_balance(rq, lowest_rq);
1702
1703out:
1704        put_task_struct(next_task);
1705
1706        return ret;
1707}
1708
1709static void push_rt_tasks(struct rq *rq)
1710{
1711        /* push_rt_task will return true if it moved an RT */
1712        while (push_rt_task(rq))
1713                ;
1714}
1715
1716static int pull_rt_task(struct rq *this_rq)
1717{
1718        int this_cpu = this_rq->cpu, ret = 0, cpu;
1719        struct task_struct *p;
1720        struct rq *src_rq;
1721
1722        if (likely(!rt_overloaded(this_rq)))
1723                return 0;
1724
1725        for_each_cpu(cpu, this_rq->rd->rto_mask) {
1726                if (this_cpu == cpu)
1727                        continue;
1728
1729                src_rq = cpu_rq(cpu);
1730
1731                /*
1732                 * Don't bother taking the src_rq->lock if the next highest
1733                 * task is known to be lower-priority than our current task.
1734                 * This may look racy, but if this value is about to go
1735                 * logically higher, the src_rq will push this task away.
1736                 * And if its going logically lower, we do not care
1737                 */
1738                if (src_rq->rt.highest_prio.next >=
1739                    this_rq->rt.highest_prio.curr)
1740                        continue;
1741
1742                /*
1743                 * We can potentially drop this_rq's lock in
1744                 * double_lock_balance, and another CPU could
1745                 * alter this_rq
1746                 */
1747                double_lock_balance(this_rq, src_rq);
1748
1749                /*
1750                 * Are there still pullable RT tasks?
1751                 */
1752                if (src_rq->rt.rt_nr_running <= 1)
1753                        goto skip;
1754
1755                p = pick_next_highest_task_rt(src_rq, this_cpu);
1756
1757                /*
1758                 * Do we have an RT task that preempts
1759                 * the to-be-scheduled task?
1760                 */
1761                if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1762                        WARN_ON(p == src_rq->curr);
1763                        WARN_ON(!p->on_rq);
1764
1765                        /*
1766                         * There's a chance that p is higher in priority
1767                         * than what's currently running on its cpu.
1768                         * This is just that p is wakeing up and hasn't
1769                         * had a chance to schedule. We only pull
1770                         * p if it is lower in priority than the
1771                         * current task on the run queue
1772                         */
1773                        if (p->prio < src_rq->curr->prio)
1774                                goto skip;
1775
1776                        ret = 1;
1777
1778                        deactivate_task(src_rq, p, 0);
1779                        set_task_cpu(p, this_cpu);
1780                        activate_task(this_rq, p, 0);
1781                        /*
1782                         * We continue with the search, just in
1783                         * case there's an even higher prio task
1784                         * in another runqueue. (low likelihood
1785                         * but possible)
1786                         */
1787                }
1788skip:
1789                double_unlock_balance(this_rq, src_rq);
1790        }
1791
1792        return ret;
1793}
1794
1795static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1796{
1797        /* Try to pull RT tasks here if we lower this rq's prio */
1798        if (rq->rt.highest_prio.curr > prev->prio)
1799                pull_rt_task(rq);
1800}
1801
1802static void post_schedule_rt(struct rq *rq)
1803{
1804        push_rt_tasks(rq);
1805}
1806
1807/*
1808 * If we are not running and we are not going to reschedule soon, we should
1809 * try to push tasks away now
1810 */
1811static void task_woken_rt(struct rq *rq, struct task_struct *p)
1812{
1813        if (!task_running(rq, p) &&
1814            !test_tsk_need_resched(rq->curr) &&
1815            has_pushable_tasks(rq) &&
1816            p->nr_cpus_allowed > 1 &&
1817            rt_task(rq->curr) &&
1818            (rq->curr->nr_cpus_allowed < 2 ||
1819             rq->curr->prio <= p->prio))
1820                push_rt_tasks(rq);
1821}
1822
1823static void set_cpus_allowed_rt(struct task_struct *p,
1824                                const struct cpumask *new_mask)
1825{
1826        struct rq *rq;
1827        int weight;
1828
1829        BUG_ON(!rt_task(p));
1830
1831        if (!p->on_rq)
1832                return;
1833
1834        weight = cpumask_weight(new_mask);
1835
1836        /*
1837         * Only update if the process changes its state from whether it
1838         * can migrate or not.
1839         */
1840        if ((p->nr_cpus_allowed > 1) == (weight > 1))
1841                return;
1842
1843        rq = task_rq(p);
1844
1845        /*
1846         * The process used to be able to migrate OR it can now migrate
1847         */
1848        if (weight <= 1) {
1849                if (!task_current(rq, p))
1850                        dequeue_pushable_task(rq, p);
1851                BUG_ON(!rq->rt.rt_nr_migratory);
1852                rq->rt.rt_nr_migratory--;
1853        } else {
1854                if (!task_current(rq, p))
1855                        enqueue_pushable_task(rq, p);
1856                rq->rt.rt_nr_migratory++;
1857        }
1858
1859        update_rt_migration(&rq->rt);
1860}
1861
1862/* Assumes rq->lock is held */
1863static void rq_online_rt(struct rq *rq)
1864{
1865        if (rq->rt.overloaded)
1866                rt_set_overload(rq);
1867
1868        __enable_runtime(rq);
1869
1870        cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1871}
1872
1873/* Assumes rq->lock is held */
1874static void rq_offline_rt(struct rq *rq)
1875{
1876        if (rq->rt.overloaded)
1877                rt_clear_overload(rq);
1878
1879        __disable_runtime(rq);
1880
1881        cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1882}
1883
1884/*
1885 * When switch from the rt queue, we bring ourselves to a position
1886 * that we might want to pull RT tasks from other runqueues.
1887 */
1888static void switched_from_rt(struct rq *rq, struct task_struct *p)
1889{
1890        /*
1891         * If there are other RT tasks then we will reschedule
1892         * and the scheduling of the other RT tasks will handle
1893         * the balancing. But if we are the last RT task
1894         * we may need to handle the pulling of RT tasks
1895         * now.
1896         */
1897        if (p->on_rq && !rq->rt.rt_nr_running)
1898                pull_rt_task(rq);
1899}
1900
1901void init_sched_rt_class(void)
1902{
1903        unsigned int i;
1904
1905        for_each_possible_cpu(i) {
1906                zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1907                                        GFP_KERNEL, cpu_to_node(i));
1908        }
1909}
1910#endif /* CONFIG_SMP */
1911
1912/*
1913 * When switching a task to RT, we may overload the runqueue
1914 * with RT tasks. In this case we try to push them off to
1915 * other runqueues.
1916 */
1917static void switched_to_rt(struct rq *rq, struct task_struct *p)
1918{
1919        int check_resched = 1;
1920
1921        /*
1922         * If we are already running, then there's nothing
1923         * that needs to be done. But if we are not running
1924         * we may need to preempt the current running task.
1925         * If that current running task is also an RT task
1926         * then see if we can move to another run queue.
1927         */
1928        if (p->on_rq && rq->curr != p) {
1929#ifdef CONFIG_SMP
1930                if (rq->rt.overloaded && push_rt_task(rq) &&
1931                    /* Don't resched if we changed runqueues */
1932                    rq != task_rq(p))
1933                        check_resched = 0;
1934#endif /* CONFIG_SMP */
1935                if (check_resched && p->prio < rq->curr->prio)
1936                        resched_task(rq->curr);
1937        }
1938}
1939
1940/*
1941 * Priority of the task has changed. This may cause
1942 * us to initiate a push or pull.
1943 */
1944static void
1945prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1946{
1947        if (!p->on_rq)
1948                return;
1949
1950        if (rq->curr == p) {
1951#ifdef CONFIG_SMP
1952                /*
1953                 * If our priority decreases while running, we
1954                 * may need to pull tasks to this runqueue.
1955                 */
1956                if (oldprio < p->prio)
1957                        pull_rt_task(rq);
1958                /*
1959                 * If there's a higher priority task waiting to run
1960                 * then reschedule. Note, the above pull_rt_task
1961                 * can release the rq lock and p could migrate.
1962                 * Only reschedule if p is still on the same runqueue.
1963                 */
1964                if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1965                        resched_task(p);
1966#else
1967                /* For UP simply resched on drop of prio */
1968                if (oldprio < p->prio)
1969                        resched_task(p);
1970#endif /* CONFIG_SMP */
1971        } else {
1972                /*
1973                 * This task is not running, but if it is
1974                 * greater than the current running task
1975                 * then reschedule.
1976                 */
1977                if (p->prio < rq->curr->prio)
1978                        resched_task(rq->curr);
1979        }
1980}
1981
1982static void watchdog(struct rq *rq, struct task_struct *p)
1983{
1984        unsigned long soft, hard;
1985
1986        /* max may change after cur was read, this will be fixed next tick */
1987        soft = task_rlimit(p, RLIMIT_RTTIME);
1988        hard = task_rlimit_max(p, RLIMIT_RTTIME);
1989
1990        if (soft != RLIM_INFINITY) {
1991                unsigned long next;
1992
1993                p->rt.timeout++;
1994                next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1995                if (p->rt.timeout > next)
1996                        p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1997        }
1998}
1999
2000static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2001{
2002        struct sched_rt_entity *rt_se = &p->rt;
2003
2004        update_curr_rt(rq);
2005
2006        watchdog(rq, p);
2007
2008        /*
2009         * RR tasks need a special form of timeslice management.
2010         * FIFO tasks have no timeslices.
2011         */
2012        if (p->policy != SCHED_RR)
2013                return;
2014
2015        if (--p->rt.time_slice)
2016                return;
2017
2018        p->rt.time_slice = RR_TIMESLICE;
2019
2020        /*
2021         * Requeue to the end of queue if we (and all of our ancestors) are the
2022         * only element on the queue
2023         */
2024        for_each_sched_rt_entity(rt_se) {
2025                if (rt_se->run_list.prev != rt_se->run_list.next) {
2026                        requeue_task_rt(rq, p, 0);
2027                        set_tsk_need_resched(p);
2028                        return;
2029                }
2030        }
2031}
2032
2033static void set_curr_task_rt(struct rq *rq)
2034{
2035        struct task_struct *p = rq->curr;
2036
2037        p->se.exec_start = rq->clock_task;
2038
2039        /* The running task is never eligible for pushing */
2040        dequeue_pushable_task(rq, p);
2041}
2042
2043static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2044{
2045        /*
2046         * Time slice is 0 for SCHED_FIFO tasks
2047         */
2048        if (task->policy == SCHED_RR)
2049                return RR_TIMESLICE;
2050        else
2051                return 0;
2052}
2053
2054const struct sched_class rt_sched_class = {
2055        .next                   = &fair_sched_class,
2056        .enqueue_task           = enqueue_task_rt,
2057        .dequeue_task           = dequeue_task_rt,
2058        .yield_task             = yield_task_rt,
2059
2060        .check_preempt_curr     = check_preempt_curr_rt,
2061
2062        .pick_next_task         = pick_next_task_rt,
2063        .put_prev_task          = put_prev_task_rt,
2064
2065#ifdef CONFIG_SMP
2066        .select_task_rq         = select_task_rq_rt,
2067
2068        .set_cpus_allowed       = set_cpus_allowed_rt,
2069        .rq_online              = rq_online_rt,
2070        .rq_offline             = rq_offline_rt,
2071        .pre_schedule           = pre_schedule_rt,
2072        .post_schedule          = post_schedule_rt,
2073        .task_woken             = task_woken_rt,
2074        .switched_from          = switched_from_rt,
2075#endif
2076
2077        .set_curr_task          = set_curr_task_rt,
2078        .task_tick              = task_tick_rt,
2079
2080        .get_rr_interval        = get_rr_interval_rt,
2081
2082        .prio_changed           = prio_changed_rt,
2083        .switched_to            = switched_to_rt,
2084};
2085
2086#ifdef CONFIG_SCHED_DEBUG
2087extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2088
2089void print_rt_stats(struct seq_file *m, int cpu)
2090{
2091        rt_rq_iter_t iter;
2092        struct rt_rq *rt_rq;
2093
2094        rcu_read_lock();
2095        for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2096                print_rt_rq(m, cpu, rt_rq);
2097        rcu_read_unlock();
2098}
2099#endif /* CONFIG_SCHED_DEBUG */
2100
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.