linux/kernel/sched_rt.c
<<
>>
Prefs
   1/*
   2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
   3 * policies)
   4 */
   5
   6#ifdef CONFIG_SMP
   7
   8static inline int rt_overloaded(struct rq *rq)
   9{
  10        return atomic_read(&rq->rd->rto_count);
  11}
  12
  13static inline void rt_set_overload(struct rq *rq)
  14{
  15        if (!rq->online)
  16                return;
  17
  18        cpu_set(rq->cpu, rq->rd->rto_mask);
  19        /*
  20         * Make sure the mask is visible before we set
  21         * the overload count. That is checked to determine
  22         * if we should look at the mask. It would be a shame
  23         * if we looked at the mask, but the mask was not
  24         * updated yet.
  25         */
  26        wmb();
  27        atomic_inc(&rq->rd->rto_count);
  28}
  29
  30static inline void rt_clear_overload(struct rq *rq)
  31{
  32        if (!rq->online)
  33                return;
  34
  35        /* the order here really doesn't matter */
  36        atomic_dec(&rq->rd->rto_count);
  37        cpu_clear(rq->cpu, rq->rd->rto_mask);
  38}
  39
  40static void update_rt_migration(struct rq *rq)
  41{
  42        if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
  43                if (!rq->rt.overloaded) {
  44                        rt_set_overload(rq);
  45                        rq->rt.overloaded = 1;
  46                }
  47        } else if (rq->rt.overloaded) {
  48                rt_clear_overload(rq);
  49                rq->rt.overloaded = 0;
  50        }
  51}
  52#endif /* CONFIG_SMP */
  53
  54static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
  55{
  56        return container_of(rt_se, struct task_struct, rt);
  57}
  58
  59static inline int on_rt_rq(struct sched_rt_entity *rt_se)
  60{
  61        return !list_empty(&rt_se->run_list);
  62}
  63
  64#ifdef CONFIG_RT_GROUP_SCHED
  65
  66static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
  67{
  68        if (!rt_rq->tg)
  69                return RUNTIME_INF;
  70
  71        return rt_rq->rt_runtime;
  72}
  73
  74static inline u64 sched_rt_period(struct rt_rq *rt_rq)
  75{
  76        return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
  77}
  78
  79#define for_each_leaf_rt_rq(rt_rq, rq) \
  80        list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
  81
  82static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
  83{
  84        return rt_rq->rq;
  85}
  86
  87static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
  88{
  89        return rt_se->rt_rq;
  90}
  91
  92#define for_each_sched_rt_entity(rt_se) \
  93        for (; rt_se; rt_se = rt_se->parent)
  94
  95static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
  96{
  97        return rt_se->my_q;
  98}
  99
 100static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
 101static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
 102
 103static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 104{
 105        struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
 106        struct sched_rt_entity *rt_se = rt_rq->rt_se;
 107
 108        if (rt_rq->rt_nr_running) {
 109                if (rt_se && !on_rt_rq(rt_se))
 110                        enqueue_rt_entity(rt_se);
 111                if (rt_rq->highest_prio < curr->prio)
 112                        resched_task(curr);
 113        }
 114}
 115
 116static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 117{
 118        struct sched_rt_entity *rt_se = rt_rq->rt_se;
 119
 120        if (rt_se && on_rt_rq(rt_se))
 121                dequeue_rt_entity(rt_se);
 122}
 123
 124static inline int rt_rq_throttled(struct rt_rq *rt_rq)
 125{
 126        return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
 127}
 128
 129static int rt_se_boosted(struct sched_rt_entity *rt_se)
 130{
 131        struct rt_rq *rt_rq = group_rt_rq(rt_se);
 132        struct task_struct *p;
 133
 134        if (rt_rq)
 135                return !!rt_rq->rt_nr_boosted;
 136
 137        p = rt_task_of(rt_se);
 138        return p->prio != p->normal_prio;
 139}
 140
 141#ifdef CONFIG_SMP
 142static inline cpumask_t sched_rt_period_mask(void)
 143{
 144        return cpu_rq(smp_processor_id())->rd->span;
 145}
 146#else
 147static inline cpumask_t sched_rt_period_mask(void)
 148{
 149        return cpu_online_map;
 150}
 151#endif
 152
 153static inline
 154struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
 155{
 156        return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
 157}
 158
 159static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 160{
 161        return &rt_rq->tg->rt_bandwidth;
 162}
 163
 164#else /* !CONFIG_RT_GROUP_SCHED */
 165
 166static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
 167{
 168        return rt_rq->rt_runtime;
 169}
 170
 171static inline u64 sched_rt_period(struct rt_rq *rt_rq)
 172{
 173        return ktime_to_ns(def_rt_bandwidth.rt_period);
 174}
 175
 176#define for_each_leaf_rt_rq(rt_rq, rq) \
 177        for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
 178
 179static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
 180{
 181        return container_of(rt_rq, struct rq, rt);
 182}
 183
 184static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
 185{
 186        struct task_struct *p = rt_task_of(rt_se);
 187        struct rq *rq = task_rq(p);
 188
 189        return &rq->rt;
 190}
 191
 192#define for_each_sched_rt_entity(rt_se) \
 193        for (; rt_se; rt_se = NULL)
 194
 195static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
 196{
 197        return NULL;
 198}
 199
 200static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 201{
 202        if (rt_rq->rt_nr_running)
 203                resched_task(rq_of_rt_rq(rt_rq)->curr);
 204}
 205
 206static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 207{
 208}
 209
 210static inline int rt_rq_throttled(struct rt_rq *rt_rq)
 211{
 212        return rt_rq->rt_throttled;
 213}
 214
 215static inline cpumask_t sched_rt_period_mask(void)
 216{
 217        return cpu_online_map;
 218}
 219
 220static inline
 221struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
 222{
 223        return &cpu_rq(cpu)->rt;
 224}
 225
 226static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
 227{
 228        return &def_rt_bandwidth;
 229}
 230
 231#endif /* CONFIG_RT_GROUP_SCHED */
 232
 233#ifdef CONFIG_SMP
 234/*
 235 * We ran out of runtime, see if we can borrow some from our neighbours.
 236 */
 237static int do_balance_runtime(struct rt_rq *rt_rq)
 238{
 239        struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 240        struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
 241        int i, weight, more = 0;
 242        u64 rt_period;
 243
 244        weight = cpus_weight(rd->span);
 245
 246        spin_lock(&rt_b->rt_runtime_lock);
 247        rt_period = ktime_to_ns(rt_b->rt_period);
 248        for_each_cpu_mask_nr(i, rd->span) {
 249                struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
 250                s64 diff;
 251
 252                if (iter == rt_rq)
 253                        continue;
 254
 255                spin_lock(&iter->rt_runtime_lock);
 256                /*
 257                 * Either all rqs have inf runtime and there's nothing to steal
 258                 * or __disable_runtime() below sets a specific rq to inf to
 259                 * indicate its been disabled and disalow stealing.
 260                 */
 261                if (iter->rt_runtime == RUNTIME_INF)
 262                        goto next;
 263
 264                /*
 265                 * From runqueues with spare time, take 1/n part of their
 266                 * spare time, but no more than our period.
 267                 */
 268                diff = iter->rt_runtime - iter->rt_time;
 269                if (diff > 0) {
 270                        diff = div_u64((u64)diff, weight);
 271                        if (rt_rq->rt_runtime + diff > rt_period)
 272                                diff = rt_period - rt_rq->rt_runtime;
 273                        iter->rt_runtime -= diff;
 274                        rt_rq->rt_runtime += diff;
 275                        more = 1;
 276                        if (rt_rq->rt_runtime == rt_period) {
 277                                spin_unlock(&iter->rt_runtime_lock);
 278                                break;
 279                        }
 280                }
 281next:
 282                spin_unlock(&iter->rt_runtime_lock);
 283        }
 284        spin_unlock(&rt_b->rt_runtime_lock);
 285
 286        return more;
 287}
 288
 289/*
 290 * Ensure this RQ takes back all the runtime it lend to its neighbours.
 291 */
 292static void __disable_runtime(struct rq *rq)
 293{
 294        struct root_domain *rd = rq->rd;
 295        struct rt_rq *rt_rq;
 296
 297        if (unlikely(!scheduler_running))
 298                return;
 299
 300        for_each_leaf_rt_rq(rt_rq, rq) {
 301                struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 302                s64 want;
 303                int i;
 304
 305                spin_lock(&rt_b->rt_runtime_lock);
 306                spin_lock(&rt_rq->rt_runtime_lock);
 307                /*
 308                 * Either we're all inf and nobody needs to borrow, or we're
 309                 * already disabled and thus have nothing to do, or we have
 310                 * exactly the right amount of runtime to take out.
 311                 */
 312                if (rt_rq->rt_runtime == RUNTIME_INF ||
 313                                rt_rq->rt_runtime == rt_b->rt_runtime)
 314                        goto balanced;
 315                spin_unlock(&rt_rq->rt_runtime_lock);
 316
 317                /*
 318                 * Calculate the difference between what we started out with
 319                 * and what we current have, that's the amount of runtime
 320                 * we lend and now have to reclaim.
 321                 */
 322                want = rt_b->rt_runtime - rt_rq->rt_runtime;
 323
 324                /*
 325                 * Greedy reclaim, take back as much as we can.
 326                 */
 327                for_each_cpu_mask(i, rd->span) {
 328                        struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
 329                        s64 diff;
 330
 331                        /*
 332                         * Can't reclaim from ourselves or disabled runqueues.
 333                         */
 334                        if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
 335                                continue;
 336
 337                        spin_lock(&iter->rt_runtime_lock);
 338                        if (want > 0) {
 339                                diff = min_t(s64, iter->rt_runtime, want);
 340                                iter->rt_runtime -= diff;
 341                                want -= diff;
 342                        } else {
 343                                iter->rt_runtime -= want;
 344                                want -= want;
 345                        }
 346                        spin_unlock(&iter->rt_runtime_lock);
 347
 348                        if (!want)
 349                                break;
 350                }
 351
 352                spin_lock(&rt_rq->rt_runtime_lock);
 353                /*
 354                 * We cannot be left wanting - that would mean some runtime
 355                 * leaked out of the system.
 356                 */
 357                BUG_ON(want);
 358balanced:
 359                /*
 360                 * Disable all the borrow logic by pretending we have inf
 361                 * runtime - in which case borrowing doesn't make sense.
 362                 */
 363                rt_rq->rt_runtime = RUNTIME_INF;
 364                spin_unlock(&rt_rq->rt_runtime_lock);
 365                spin_unlock(&rt_b->rt_runtime_lock);
 366        }
 367}
 368
 369static void disable_runtime(struct rq *rq)
 370{
 371        unsigned long flags;
 372
 373        spin_lock_irqsave(&rq->lock, flags);
 374        __disable_runtime(rq);
 375        spin_unlock_irqrestore(&rq->lock, flags);
 376}
 377
 378static void __enable_runtime(struct rq *rq)
 379{
 380        struct rt_rq *rt_rq;
 381
 382        if (unlikely(!scheduler_running))
 383                return;
 384
 385        /*
 386         * Reset each runqueue's bandwidth settings
 387         */
 388        for_each_leaf_rt_rq(rt_rq, rq) {
 389                struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
 390
 391                spin_lock(&rt_b->rt_runtime_lock);
 392                spin_lock(&rt_rq->rt_runtime_lock);
 393                rt_rq->rt_runtime = rt_b->rt_runtime;
 394                rt_rq->rt_time = 0;
 395                rt_rq->rt_throttled = 0;
 396                spin_unlock(&rt_rq->rt_runtime_lock);
 397                spin_unlock(&rt_b->rt_runtime_lock);
 398        }
 399}
 400
 401static void enable_runtime(struct rq *rq)
 402{
 403        unsigned long flags;
 404
 405        spin_lock_irqsave(&rq->lock, flags);
 406        __enable_runtime(rq);
 407        spin_unlock_irqrestore(&rq->lock, flags);
 408}
 409
 410static int balance_runtime(struct rt_rq *rt_rq)
 411{
 412        int more = 0;
 413
 414        if (rt_rq->rt_time > rt_rq->rt_runtime) {
 415                spin_unlock(&rt_rq->rt_runtime_lock);
 416                more = do_balance_runtime(rt_rq);
 417                spin_lock(&rt_rq->rt_runtime_lock);
 418        }
 419
 420        return more;
 421}
 422#else /* !CONFIG_SMP */
 423static inline int balance_runtime(struct rt_rq *rt_rq)
 424{
 425        return 0;
 426}
 427#endif /* CONFIG_SMP */
 428
 429static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
 430{
 431        int i, idle = 1;
 432        cpumask_t span;
 433
 434        if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
 435                return 1;
 436
 437        span = sched_rt_period_mask();
 438        for_each_cpu_mask(i, span) {
 439                int enqueue = 0;
 440                struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
 441                struct rq *rq = rq_of_rt_rq(rt_rq);
 442
 443                spin_lock(&rq->lock);
 444                if (rt_rq->rt_time) {
 445                        u64 runtime;
 446
 447                        spin_lock(&rt_rq->rt_runtime_lock);
 448                        if (rt_rq->rt_throttled)
 449                                balance_runtime(rt_rq);
 450                        runtime = rt_rq->rt_runtime;
 451                        rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
 452                        if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
 453                                rt_rq->rt_throttled = 0;
 454                                enqueue = 1;
 455                        }
 456                        if (rt_rq->rt_time || rt_rq->rt_nr_running)
 457                                idle = 0;
 458                        spin_unlock(&rt_rq->rt_runtime_lock);
 459                } else if (rt_rq->rt_nr_running)
 460                        idle = 0;
 461
 462                if (enqueue)
 463                        sched_rt_rq_enqueue(rt_rq);
 464                spin_unlock(&rq->lock);
 465        }
 466
 467        return idle;
 468}
 469
 470static inline int rt_se_prio(struct sched_rt_entity *rt_se)
 471{
 472#ifdef CONFIG_RT_GROUP_SCHED
 473        struct rt_rq *rt_rq = group_rt_rq(rt_se);
 474
 475        if (rt_rq)
 476                return rt_rq->highest_prio;
 477#endif
 478
 479        return rt_task_of(rt_se)->prio;
 480}
 481
 482static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
 483{
 484        u64 runtime = sched_rt_runtime(rt_rq);
 485
 486        if (rt_rq->rt_throttled)
 487                return rt_rq_throttled(rt_rq);
 488
 489        if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
 490                return 0;
 491
 492        balance_runtime(rt_rq);
 493        runtime = sched_rt_runtime(rt_rq);
 494        if (runtime == RUNTIME_INF)
 495                return 0;
 496
 497        if (rt_rq->rt_time > runtime) {
 498                rt_rq->rt_throttled = 1;
 499                if (rt_rq_throttled(rt_rq)) {
 500                        sched_rt_rq_dequeue(rt_rq);
 501                        return 1;
 502                }
 503        }
 504
 505        return 0;
 506}
 507
 508/*
 509 * Update the current task's runtime statistics. Skip current tasks that
 510 * are not in our scheduling class.
 511 */
 512static void update_curr_rt(struct rq *rq)
 513{
 514        struct task_struct *curr = rq->curr;
 515        struct sched_rt_entity *rt_se = &curr->rt;
 516        struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
 517        u64 delta_exec;
 518
 519        if (!task_has_rt_policy(curr))
 520                return;
 521
 522        delta_exec = rq->clock - curr->se.exec_start;
 523        if (unlikely((s64)delta_exec < 0))
 524                delta_exec = 0;
 525
 526        schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
 527
 528        curr->se.sum_exec_runtime += delta_exec;
 529        account_group_exec_runtime(curr, delta_exec);
 530
 531        curr->se.exec_start = rq->clock;
 532        cpuacct_charge(curr, delta_exec);
 533
 534        if (!rt_bandwidth_enabled())
 535                return;
 536
 537        for_each_sched_rt_entity(rt_se) {
 538                rt_rq = rt_rq_of_se(rt_se);
 539
 540                spin_lock(&rt_rq->rt_runtime_lock);
 541                if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
 542                        rt_rq->rt_time += delta_exec;
 543                        if (sched_rt_runtime_exceeded(rt_rq))
 544                                resched_task(curr);
 545                }
 546                spin_unlock(&rt_rq->rt_runtime_lock);
 547        }
 548}
 549
 550static inline
 551void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 552{
 553        WARN_ON(!rt_prio(rt_se_prio(rt_se)));
 554        rt_rq->rt_nr_running++;
 555#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
 556        if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
 557#ifdef CONFIG_SMP
 558                struct rq *rq = rq_of_rt_rq(rt_rq);
 559#endif
 560
 561                rt_rq->highest_prio = rt_se_prio(rt_se);
 562#ifdef CONFIG_SMP
 563                if (rq->online)
 564                        cpupri_set(&rq->rd->cpupri, rq->cpu,
 565                                   rt_se_prio(rt_se));
 566#endif
 567        }
 568#endif
 569#ifdef CONFIG_SMP
 570        if (rt_se->nr_cpus_allowed > 1) {
 571                struct rq *rq = rq_of_rt_rq(rt_rq);
 572
 573                rq->rt.rt_nr_migratory++;
 574        }
 575
 576        update_rt_migration(rq_of_rt_rq(rt_rq));
 577#endif
 578#ifdef CONFIG_RT_GROUP_SCHED
 579        if (rt_se_boosted(rt_se))
 580                rt_rq->rt_nr_boosted++;
 581
 582        if (rt_rq->tg)
 583                start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
 584#else
 585        start_rt_bandwidth(&def_rt_bandwidth);
 586#endif
 587}
 588
 589static inline
 590void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 591{
 592#ifdef CONFIG_SMP
 593        int highest_prio = rt_rq->highest_prio;
 594#endif
 595
 596        WARN_ON(!rt_prio(rt_se_prio(rt_se)));
 597        WARN_ON(!rt_rq->rt_nr_running);
 598        rt_rq->rt_nr_running--;
 599#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
 600        if (rt_rq->rt_nr_running) {
 601                struct rt_prio_array *array;
 602
 603                WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
 604                if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
 605                        /* recalculate */
 606                        array = &rt_rq->active;
 607                        rt_rq->highest_prio =
 608                                sched_find_first_bit(array->bitmap);
 609                } /* otherwise leave rq->highest prio alone */
 610        } else
 611                rt_rq->highest_prio = MAX_RT_PRIO;
 612#endif
 613#ifdef CONFIG_SMP
 614        if (rt_se->nr_cpus_allowed > 1) {
 615                struct rq *rq = rq_of_rt_rq(rt_rq);
 616                rq->rt.rt_nr_migratory--;
 617        }
 618
 619        if (rt_rq->highest_prio != highest_prio) {
 620                struct rq *rq = rq_of_rt_rq(rt_rq);
 621
 622                if (rq->online)
 623                        cpupri_set(&rq->rd->cpupri, rq->cpu,
 624                                   rt_rq->highest_prio);
 625        }
 626
 627        update_rt_migration(rq_of_rt_rq(rt_rq));
 628#endif /* CONFIG_SMP */
 629#ifdef CONFIG_RT_GROUP_SCHED
 630        if (rt_se_boosted(rt_se))
 631                rt_rq->rt_nr_boosted--;
 632
 633        WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
 634#endif
 635}
 636
 637static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
 638{
 639        struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
 640        struct rt_prio_array *array = &rt_rq->active;
 641        struct rt_rq *group_rq = group_rt_rq(rt_se);
 642        struct list_head *queue = array->queue + rt_se_prio(rt_se);
 643
 644        /*
 645         * Don't enqueue the group if its throttled, or when empty.
 646         * The latter is a consequence of the former when a child group
 647         * get throttled and the current group doesn't have any other
 648         * active members.
 649         */
 650        if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
 651                return;
 652
 653        list_add_tail(&rt_se->run_list, queue);
 654        __set_bit(rt_se_prio(rt_se), array->bitmap);
 655
 656        inc_rt_tasks(rt_se, rt_rq);
 657}
 658
 659static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
 660{
 661        struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
 662        struct rt_prio_array *array = &rt_rq->active;
 663
 664        list_del_init(&rt_se->run_list);
 665        if (list_empty(array->queue + rt_se_prio(rt_se)))
 666                __clear_bit(rt_se_prio(rt_se), array->bitmap);
 667
 668        dec_rt_tasks(rt_se, rt_rq);
 669}
 670
 671/*
 672 * Because the prio of an upper entry depends on the lower
 673 * entries, we must remove entries top - down.
 674 */
 675static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
 676{
 677        struct sched_rt_entity *back = NULL;
 678
 679        for_each_sched_rt_entity(rt_se) {
 680                rt_se->back = back;
 681                back = rt_se;
 682        }
 683
 684        for (rt_se = back; rt_se; rt_se = rt_se->back) {
 685                if (on_rt_rq(rt_se))
 686                        __dequeue_rt_entity(rt_se);
 687        }
 688}
 689
 690static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
 691{
 692        dequeue_rt_stack(rt_se);
 693        for_each_sched_rt_entity(rt_se)
 694                __enqueue_rt_entity(rt_se);
 695}
 696
 697static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
 698{
 699        dequeue_rt_stack(rt_se);
 700
 701        for_each_sched_rt_entity(rt_se) {
 702                struct rt_rq *rt_rq = group_rt_rq(rt_se);
 703
 704                if (rt_rq && rt_rq->rt_nr_running)
 705                        __enqueue_rt_entity(rt_se);
 706        }
 707}
 708
 709/*
 710 * Adding/removing a task to/from a priority array:
 711 */
 712static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
 713{
 714        struct sched_rt_entity *rt_se = &p->rt;
 715
 716        if (wakeup)
 717                rt_se->timeout = 0;
 718
 719        enqueue_rt_entity(rt_se);
 720
 721        inc_cpu_load(rq, p->se.load.weight);
 722}
 723
 724static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
 725{
 726        struct sched_rt_entity *rt_se = &p->rt;
 727
 728        update_curr_rt(rq);
 729        dequeue_rt_entity(rt_se);
 730
 731        dec_cpu_load(rq, p->se.load.weight);
 732}
 733
 734/*
 735 * Put task to the end of the run list without the overhead of dequeue
 736 * followed by enqueue.
 737 */
 738static void
 739requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
 740{
 741        if (on_rt_rq(rt_se)) {
 742                struct rt_prio_array *array = &rt_rq->active;
 743                struct list_head *queue = array->queue + rt_se_prio(rt_se);
 744
 745                if (head)
 746                        list_move(&rt_se->run_list, queue);
 747                else
 748                        list_move_tail(&rt_se->run_list, queue);
 749        }
 750}
 751
 752static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
 753{
 754        struct sched_rt_entity *rt_se = &p->rt;
 755        struct rt_rq *rt_rq;
 756
 757        for_each_sched_rt_entity(rt_se) {
 758                rt_rq = rt_rq_of_se(rt_se);
 759                requeue_rt_entity(rt_rq, rt_se, head);
 760        }
 761}
 762
 763static void yield_task_rt(struct rq *rq)
 764{
 765        requeue_task_rt(rq, rq->curr, 0);
 766}
 767
 768#ifdef CONFIG_SMP
 769static int find_lowest_rq(struct task_struct *task);
 770
 771static int select_task_rq_rt(struct task_struct *p, int sync)
 772{
 773        struct rq *rq = task_rq(p);
 774
 775        /*
 776         * If the current task is an RT task, then
 777         * try to see if we can wake this RT task up on another
 778         * runqueue. Otherwise simply start this RT task
 779         * on its current runqueue.
 780         *
 781         * We want to avoid overloading runqueues. Even if
 782         * the RT task is of higher priority than the current RT task.
 783         * RT tasks behave differently than other tasks. If
 784         * one gets preempted, we try to push it off to another queue.
 785         * So trying to keep a preempting RT task on the same
 786         * cache hot CPU will force the running RT task to
 787         * a cold CPU. So we waste all the cache for the lower
 788         * RT task in hopes of saving some of a RT task
 789         * that is just being woken and probably will have
 790         * cold cache anyway.
 791         */
 792        if (unlikely(rt_task(rq->curr)) &&
 793            (p->rt.nr_cpus_allowed > 1)) {
 794                int cpu = find_lowest_rq(p);
 795
 796                return (cpu == -1) ? task_cpu(p) : cpu;
 797        }
 798
 799        /*
 800         * Otherwise, just let it ride on the affined RQ and the
 801         * post-schedule router will push the preempted task away
 802         */
 803        return task_cpu(p);
 804}
 805
 806static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
 807{
 808        cpumask_t mask;
 809
 810        if (rq->curr->rt.nr_cpus_allowed == 1)
 811                return;
 812
 813        if (p->rt.nr_cpus_allowed != 1
 814            && cpupri_find(&rq->rd->cpupri, p, &mask))
 815                return;
 816
 817        if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
 818                return;
 819
 820        /*
 821         * There appears to be other cpus that can accept
 822         * current and none to run 'p', so lets reschedule
 823         * to try and push current away:
 824         */
 825        requeue_task_rt(rq, p, 1);
 826        resched_task(rq->curr);
 827}
 828
 829#endif /* CONFIG_SMP */
 830
 831/*
 832 * Preempt the current task with a newly woken task if needed:
 833 */
 834static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync)
 835{
 836        if (p->prio < rq->curr->prio) {
 837                resched_task(rq->curr);
 838                return;
 839        }
 840
 841#ifdef CONFIG_SMP
 842        /*
 843         * If:
 844         *
 845         * - the newly woken task is of equal priority to the current task
 846         * - the newly woken task is non-migratable while current is migratable
 847         * - current will be preempted on the next reschedule
 848         *
 849         * we should check to see if current can readily move to a different
 850         * cpu.  If so, we will reschedule to allow the push logic to try
 851         * to move current somewhere else, making room for our non-migratable
 852         * task.
 853         */
 854        if (p->prio == rq->curr->prio && !need_resched())
 855                check_preempt_equal_prio(rq, p);
 856#endif
 857}
 858
 859static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
 860                                                   struct rt_rq *rt_rq)
 861{
 862        struct rt_prio_array *array = &rt_rq->active;
 863        struct sched_rt_entity *next = NULL;
 864        struct list_head *queue;
 865        int idx;
 866
 867        idx = sched_find_first_bit(array->bitmap);
 868        BUG_ON(idx >= MAX_RT_PRIO);
 869
 870        queue = array->queue + idx;
 871        next = list_entry(queue->next, struct sched_rt_entity, run_list);
 872
 873        return next;
 874}
 875
 876static struct task_struct *pick_next_task_rt(struct rq *rq)
 877{
 878        struct sched_rt_entity *rt_se;
 879        struct task_struct *p;
 880        struct rt_rq *rt_rq;
 881
 882        rt_rq = &rq->rt;
 883
 884        if (unlikely(!rt_rq->rt_nr_running))
 885                return NULL;
 886
 887        if (rt_rq_throttled(rt_rq))
 888                return NULL;
 889
 890        do {
 891                rt_se = pick_next_rt_entity(rq, rt_rq);
 892                BUG_ON(!rt_se);
 893                rt_rq = group_rt_rq(rt_se);
 894        } while (rt_rq);
 895
 896        p = rt_task_of(rt_se);
 897        p->se.exec_start = rq->clock;
 898        return p;
 899}
 900
 901static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
 902{
 903        update_curr_rt(rq);
 904        p->se.exec_start = 0;
 905}
 906
 907#ifdef CONFIG_SMP
 908
 909/* Only try algorithms three times */
 910#define RT_MAX_TRIES 3
 911
 912static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
 913static void double_unlock_balance(struct rq *this_rq, struct rq *busiest);
 914
 915static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
 916
 917static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
 918{
 919        if (!task_running(rq, p) &&
 920            (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
 921            (p->rt.nr_cpus_allowed > 1))
 922                return 1;
 923        return 0;
 924}
 925
 926/* Return the second highest RT task, NULL otherwise */
 927static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
 928{
 929        struct task_struct *next = NULL;
 930        struct sched_rt_entity *rt_se;
 931        struct rt_prio_array *array;
 932        struct rt_rq *rt_rq;
 933        int idx;
 934
 935        for_each_leaf_rt_rq(rt_rq, rq) {
 936                array = &rt_rq->active;
 937                idx = sched_find_first_bit(array->bitmap);
 938 next_idx:
 939                if (idx >= MAX_RT_PRIO)
 940                        continue;
 941                if (next && next->prio < idx)
 942                        continue;
 943                list_for_each_entry(rt_se, array->queue + idx, run_list) {
 944                        struct task_struct *p = rt_task_of(rt_se);
 945                        if (pick_rt_task(rq, p, cpu)) {
 946                                next = p;
 947                                break;
 948                        }
 949                }
 950                if (!next) {
 951                        idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
 952                        goto next_idx;
 953                }
 954        }
 955
 956        return next;
 957}
 958
 959static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
 960
 961static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
 962{
 963        int first;
 964
 965        /* "this_cpu" is cheaper to preempt than a remote processor */
 966        if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
 967                return this_cpu;
 968
 969        first = first_cpu(*mask);
 970        if (first != NR_CPUS)
 971                return first;
 972
 973        return -1;
 974}
 975
 976static int find_lowest_rq(struct task_struct *task)
 977{
 978        struct sched_domain *sd;
 979        cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
 980        int this_cpu = smp_processor_id();
 981        int cpu      = task_cpu(task);
 982
 983        if (task->rt.nr_cpus_allowed == 1)
 984                return -1; /* No other targets possible */
 985
 986        if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
 987                return -1; /* No targets found */
 988
 989        /*
 990         * Only consider CPUs that are usable for migration.
 991         * I guess we might want to change cpupri_find() to ignore those
 992         * in the first place.
 993         */
 994        cpus_and(*lowest_mask, *lowest_mask, cpu_active_map);
 995
 996        /*
 997         * At this point we have built a mask of cpus representing the
 998         * lowest priority tasks in the system.  Now we want to elect
 999         * the best one based on our affinity and topology.
1000         *
1001         * We prioritize the last cpu that the task executed on since
1002         * it is most likely cache-hot in that location.
1003         */
1004        if (cpu_isset(cpu, *lowest_mask))
1005                return cpu;
1006
1007        /*
1008         * Otherwise, we consult the sched_domains span maps to figure
1009         * out which cpu is logically closest to our hot cache data.
1010         */
1011        if (this_cpu == cpu)
1012                this_cpu = -1; /* Skip this_cpu opt if the same */
1013
1014        for_each_domain(cpu, sd) {
1015                if (sd->flags & SD_WAKE_AFFINE) {
1016                        cpumask_t domain_mask;
1017                        int       best_cpu;
1018
1019                        cpus_and(domain_mask, sd->span, *lowest_mask);
1020
1021                        best_cpu = pick_optimal_cpu(this_cpu,
1022                                                    &domain_mask);
1023                        if (best_cpu != -1)
1024                                return best_cpu;
1025                }
1026        }
1027
1028        /*
1029         * And finally, if there were no matches within the domains
1030         * just give the caller *something* to work with from the compatible
1031         * locations.
1032         */
1033        return pick_optimal_cpu(this_cpu, lowest_mask);
1034}
1035
1036/* Will lock the rq it finds */
1037static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1038{
1039        struct rq *lowest_rq = NULL;
1040        int tries;
1041        int cpu;
1042
1043        for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1044                cpu = find_lowest_rq(task);
1045
1046                if ((cpu == -1) || (cpu == rq->cpu))
1047                        break;
1048
1049                lowest_rq = cpu_rq(cpu);
1050
1051                /* if the prio of this runqueue changed, try again */
1052                if (double_lock_balance(rq, lowest_rq)) {
1053                        /*
1054                         * We had to unlock the run queue. In
1055                         * the mean time, task could have
1056                         * migrated already or had its affinity changed.
1057                         * Also make sure that it wasn't scheduled on its rq.
1058                         */
1059                        if (unlikely(task_rq(task) != rq ||
1060                                     !cpu_isset(lowest_rq->cpu,
1061                                                task->cpus_allowed) ||
1062                                     task_running(rq, task) ||
1063                                     !task->se.on_rq)) {
1064
1065                                spin_unlock(&lowest_rq->lock);
1066                                lowest_rq = NULL;
1067                                break;
1068                        }
1069                }
1070
1071                /* If this rq is still suitable use it. */
1072                if (lowest_rq->rt.highest_prio > task->prio)
1073                        break;
1074
1075                /* try again */
1076                double_unlock_balance(rq, lowest_rq);
1077                lowest_rq = NULL;
1078        }
1079
1080        return lowest_rq;
1081}
1082
1083/*
1084 * If the current CPU has more than one RT task, see if the non
1085 * running task can migrate over to a CPU that is running a task
1086 * of lesser priority.
1087 */
1088static int push_rt_task(struct rq *rq)
1089{
1090        struct task_struct *next_task;
1091        struct rq *lowest_rq;
1092        int ret = 0;
1093        int paranoid = RT_MAX_TRIES;
1094
1095        if (!rq->rt.overloaded)
1096                return 0;
1097
1098        next_task = pick_next_highest_task_rt(rq, -1);
1099        if (!next_task)
1100                return 0;
1101
1102 retry:
1103        if (unlikely(next_task == rq->curr)) {
1104                WARN_ON(1);
1105                return 0;
1106        }
1107
1108        /*
1109         * It's possible that the next_task slipped in of
1110         * higher priority than current. If that's the case
1111         * just reschedule current.
1112         */
1113        if (unlikely(next_task->prio < rq->curr->prio)) {
1114                resched_task(rq->curr);
1115                return 0;
1116        }
1117
1118        /* We might release rq lock */
1119        get_task_struct(next_task);
1120
1121        /* find_lock_lowest_rq locks the rq if found */
1122        lowest_rq = find_lock_lowest_rq(next_task, rq);
1123        if (!lowest_rq) {
1124                struct task_struct *task;
1125                /*
1126                 * find lock_lowest_rq releases rq->lock
1127                 * so it is possible that next_task has changed.
1128                 * If it has, then try again.
1129                 */
1130                task = pick_next_highest_task_rt(rq, -1);
1131                if (unlikely(task != next_task) && task && paranoid--) {
1132                        put_task_struct(next_task);
1133                        next_task = task;
1134                        goto retry;
1135                }
1136                goto out;
1137        }
1138
1139        deactivate_task(rq, next_task, 0);
1140        set_task_cpu(next_task, lowest_rq->cpu);
1141        activate_task(lowest_rq, next_task, 0);
1142
1143        resched_task(lowest_rq->curr);
1144
1145        double_unlock_balance(rq, lowest_rq);
1146
1147        ret = 1;
1148out:
1149        put_task_struct(next_task);
1150
1151        return ret;
1152}
1153
1154/*
1155 * TODO: Currently we just use the second highest prio task on
1156 *       the queue, and stop when it can't migrate (or there's
1157 *       no more RT tasks).  There may be a case where a lower
1158 *       priority RT task has a different affinity than the
1159 *       higher RT task. In this case the lower RT task could
1160 *       possibly be able to migrate where as the higher priority
1161 *       RT task could not.  We currently ignore this issue.
1162 *       Enhancements are welcome!
1163 */
1164static void push_rt_tasks(struct rq *rq)
1165{
1166        /* push_rt_task will return true if it moved an RT */
1167        while (push_rt_task(rq))
1168                ;
1169}
1170
1171static int pull_rt_task(struct rq *this_rq)
1172{
1173        int this_cpu = this_rq->cpu, ret = 0, cpu;
1174        struct task_struct *p, *next;
1175        struct rq *src_rq;
1176
1177        if (likely(!rt_overloaded(this_rq)))
1178                return 0;
1179
1180        next = pick_next_task_rt(this_rq);
1181
1182        for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
1183                if (this_cpu == cpu)
1184                        continue;
1185
1186                src_rq = cpu_rq(cpu);
1187                /*
1188                 * We can potentially drop this_rq's lock in
1189                 * double_lock_balance, and another CPU could
1190                 * steal our next task - hence we must cause
1191                 * the caller to recalculate the next task
1192                 * in that case:
1193                 */
1194                if (double_lock_balance(this_rq, src_rq)) {
1195                        struct task_struct *old_next = next;
1196
1197                        next = pick_next_task_rt(this_rq);
1198                        if (next != old_next)
1199                                ret = 1;
1200                }
1201
1202                /*
1203                 * Are there still pullable RT tasks?
1204                 */
1205                if (src_rq->rt.rt_nr_running <= 1)
1206                        goto skip;
1207
1208                p = pick_next_highest_task_rt(src_rq, this_cpu);
1209
1210                /*
1211                 * Do we have an RT task that preempts
1212                 * the to-be-scheduled task?
1213                 */
1214                if (p && (!next || (p->prio < next->prio))) {
1215                        WARN_ON(p == src_rq->curr);
1216                        WARN_ON(!p->se.on_rq);
1217
1218                        /*
1219                         * There's a chance that p is higher in priority
1220                         * than what's currently running on its cpu.
1221                         * This is just that p is wakeing up and hasn't
1222                         * had a chance to schedule. We only pull
1223                         * p if it is lower in priority than the
1224                         * current task on the run queue or
1225                         * this_rq next task is lower in prio than
1226                         * the current task on that rq.
1227                         */
1228                        if (p->prio < src_rq->curr->prio ||
1229                            (next && next->prio < src_rq->curr->prio))
1230                                goto skip;
1231
1232                        ret = 1;
1233
1234                        deactivate_task(src_rq, p, 0);
1235                        set_task_cpu(p, this_cpu);
1236                        activate_task(this_rq, p, 0);
1237                        /*
1238                         * We continue with the search, just in
1239                         * case there's an even higher prio task
1240                         * in another runqueue. (low likelyhood
1241                         * but possible)
1242                         *
1243                         * Update next so that we won't pick a task
1244                         * on another cpu with a priority lower (or equal)
1245                         * than the one we just picked.
1246                         */
1247                        next = p;
1248
1249                }
1250 skip:
1251                double_unlock_balance(this_rq, src_rq);
1252        }
1253
1254        return ret;
1255}
1256
1257static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1258{
1259        /* Try to pull RT tasks here if we lower this rq's prio */
1260        if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
1261                pull_rt_task(rq);
1262}
1263
1264static void post_schedule_rt(struct rq *rq)
1265{
1266        /*
1267         * If we have more than one rt_task queued, then
1268         * see if we can push the other rt_tasks off to other CPUS.
1269         * Note we may release the rq lock, and since
1270         * the lock was owned by prev, we need to release it
1271         * first via finish_lock_switch and then reaquire it here.
1272         */
1273        if (unlikely(rq->rt.overloaded)) {
1274                spin_lock_irq(&rq->lock);
1275                push_rt_tasks(rq);
1276                spin_unlock_irq(&rq->lock);
1277        }
1278}
1279
1280/*
1281 * If we are not running and we are not going to reschedule soon, we should
1282 * try to push tasks away now
1283 */
1284static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1285{
1286        if (!task_running(rq, p) &&
1287            !test_tsk_need_resched(rq->curr) &&
1288            rq->rt.overloaded)
1289                push_rt_tasks(rq);
1290}
1291
1292static unsigned long
1293load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1294                unsigned long max_load_move,
1295                struct sched_domain *sd, enum cpu_idle_type idle,
1296                int *all_pinned, int *this_best_prio)
1297{
1298        /* don't touch RT tasks */
1299        return 0;
1300}
1301
1302static int
1303move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1304                 struct sched_domain *sd, enum cpu_idle_type idle)
1305{
1306        /* don't touch RT tasks */
1307        return 0;
1308}
1309
1310static void set_cpus_allowed_rt(struct task_struct *p,
1311                                const cpumask_t *new_mask)
1312{
1313        int weight = cpus_weight(*new_mask);
1314
1315        BUG_ON(!rt_task(p));
1316
1317        /*
1318         * Update the migration status of the RQ if we have an RT task
1319         * which is running AND changing its weight value.
1320         */
1321        if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1322                struct rq *rq = task_rq(p);
1323
1324                if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1325                        rq->rt.rt_nr_migratory++;
1326                } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1327                        BUG_ON(!rq->rt.rt_nr_migratory);
1328                        rq->rt.rt_nr_migratory--;
1329                }
1330
1331                update_rt_migration(rq);
1332        }
1333
1334        p->cpus_allowed    = *new_mask;
1335        p->rt.nr_cpus_allowed = weight;
1336}
1337
1338/* Assumes rq->lock is held */
1339static void rq_online_rt(struct rq *rq)
1340{
1341        if (rq->rt.overloaded)
1342                rt_set_overload(rq);
1343
1344        __enable_runtime(rq);
1345
1346        cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
1347}
1348
1349/* Assumes rq->lock is held */
1350static void rq_offline_rt(struct rq *rq)
1351{
1352        if (rq->rt.overloaded)
1353                rt_clear_overload(rq);
1354
1355        __disable_runtime(rq);
1356
1357        cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1358}
1359
1360/*
1361 * When switch from the rt queue, we bring ourselves to a position
1362 * that we might want to pull RT tasks from other runqueues.
1363 */
1364static void switched_from_rt(struct rq *rq, struct task_struct *p,
1365                           int running)
1366{
1367        /*
1368         * If there are other RT tasks then we will reschedule
1369         * and the scheduling of the other RT tasks will handle
1370         * the balancing. But if we are the last RT task
1371         * we may need to handle the pulling of RT tasks
1372         * now.
1373         */
1374        if (!rq->rt.rt_nr_running)
1375                pull_rt_task(rq);
1376}
1377#endif /* CONFIG_SMP */
1378
1379/*
1380 * When switching a task to RT, we may overload the runqueue
1381 * with RT tasks. In this case we try to push them off to
1382 * other runqueues.
1383 */
1384static void switched_to_rt(struct rq *rq, struct task_struct *p,
1385                           int running)
1386{
1387        int check_resched = 1;
1388
1389        /*
1390         * If we are already running, then there's nothing
1391         * that needs to be done. But if we are not running
1392         * we may need to preempt the current running task.
1393         * If that current running task is also an RT task
1394         * then see if we can move to another run queue.
1395         */
1396        if (!running) {
1397#ifdef CONFIG_SMP
1398                if (rq->rt.overloaded && push_rt_task(rq) &&
1399                    /* Don't resched if we changed runqueues */
1400                    rq != task_rq(p))
1401                        check_resched = 0;
1402#endif /* CONFIG_SMP */
1403                if (check_resched && p->prio < rq->curr->prio)
1404                        resched_task(rq->curr);
1405        }
1406}
1407
1408/*
1409 * Priority of the task has changed. This may cause
1410 * us to initiate a push or pull.
1411 */
1412static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1413                            int oldprio, int running)
1414{
1415        if (running) {
1416#ifdef CONFIG_SMP
1417                /*
1418                 * If our priority decreases while running, we
1419                 * may need to pull tasks to this runqueue.
1420                 */
1421                if (oldprio < p->prio)
1422                        pull_rt_task(rq);
1423                /*
1424                 * If there's a higher priority task waiting to run
1425                 * then reschedule. Note, the above pull_rt_task
1426                 * can release the rq lock and p could migrate.
1427                 * Only reschedule if p is still on the same runqueue.
1428                 */
1429                if (p->prio > rq->rt.highest_prio && rq->curr == p)
1430                        resched_task(p);
1431#else
1432                /* For UP simply resched on drop of prio */
1433                if (oldprio < p->prio)
1434                        resched_task(p);
1435#endif /* CONFIG_SMP */
1436        } else {
1437                /*
1438                 * This task is not running, but if it is
1439                 * greater than the current running task
1440                 * then reschedule.
1441                 */
1442                if (p->prio < rq->curr->prio)
1443                        resched_task(rq->curr);
1444        }
1445}
1446
1447static void watchdog(struct rq *rq, struct task_struct *p)
1448{
1449        unsigned long soft, hard;
1450
1451        if (!p->signal)
1452                return;
1453
1454        soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1455        hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1456
1457        if (soft != RLIM_INFINITY) {
1458                unsigned long next;
1459
1460                p->rt.timeout++;
1461                next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1462                if (p->rt.timeout > next)
1463                        p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1464        }
1465}
1466
1467static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1468{
1469        update_curr_rt(rq);
1470
1471        watchdog(rq, p);
1472
1473        /*
1474         * RR tasks need a special form of timeslice management.
1475         * FIFO tasks have no timeslices.
1476         */
1477        if (p->policy != SCHED_RR)
1478                return;
1479
1480        if (--p->rt.time_slice)
1481                return;
1482
1483        p->rt.time_slice = DEF_TIMESLICE;
1484
1485        /*
1486         * Requeue to the end of queue if we are not the only element
1487         * on the queue:
1488         */
1489        if (p->rt.run_list.prev != p->rt.run_list.next) {
1490                requeue_task_rt(rq, p, 0);
1491                set_tsk_need_resched(p);
1492        }
1493}
1494
1495static void set_curr_task_rt(struct rq *rq)
1496{
1497        struct task_struct *p = rq->curr;
1498
1499        p->se.exec_start = rq->clock;
1500}
1501
1502static const struct sched_class rt_sched_class = {
1503        .next                   = &fair_sched_class,
1504        .enqueue_task           = enqueue_task_rt,
1505        .dequeue_task           = dequeue_task_rt,
1506        .yield_task             = yield_task_rt,
1507
1508        .check_preempt_curr     = check_preempt_curr_rt,
1509
1510        .pick_next_task         = pick_next_task_rt,
1511        .put_prev_task          = put_prev_task_rt,
1512
1513#ifdef CONFIG_SMP
1514        .select_task_rq         = select_task_rq_rt,
1515
1516        .load_balance           = load_balance_rt,
1517        .move_one_task          = move_one_task_rt,
1518        .set_cpus_allowed       = set_cpus_allowed_rt,
1519        .rq_online              = rq_online_rt,
1520        .rq_offline             = rq_offline_rt,
1521        .pre_schedule           = pre_schedule_rt,
1522        .post_schedule          = post_schedule_rt,
1523        .task_wake_up           = task_wake_up_rt,
1524        .switched_from          = switched_from_rt,
1525#endif
1526
1527        .set_curr_task          = set_curr_task_rt,
1528        .task_tick              = task_tick_rt,
1529
1530        .prio_changed           = prio_changed_rt,
1531        .switched_to            = switched_to_rt,
1532};
1533
1534#ifdef CONFIG_SCHED_DEBUG
1535extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1536
1537static void print_rt_stats(struct seq_file *m, int cpu)
1538{
1539        struct rt_rq *rt_rq;
1540
1541        rcu_read_lock();
1542        for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
1543                print_rt_rq(m, cpu, rt_rq);
1544        rcu_read_unlock();
1545}
1546#endif /* CONFIG_SCHED_DEBUG */
1547